Package web2py :: Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module web2py.gluon.dal

   1  #!/bin/env python 
   2  # -*- coding: utf-8 -*- 
   3   
   4  """ 
   5  This file is part of the web2py Web Framework 
   6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
   7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
   8   
   9  Thanks to 
  10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
  11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
  12      * Denes 
  13      * Chris Clark 
  14      * clach05 
  15      * Denes Lengyel 
  16      * and many others who have contributed to current and previous versions 
  17   
  18  This file contains the DAL support for many relational databases, 
  19  including SQLite, MySQL, Postgres, Oracle, MS SQL, DB2, Interbase, Ingres 
  20   
  21  Completely refactored by MDP on Dec, 2010 
  22   
  23  TODO: 
  24  - create more functions in adapters to abstract more 
  25  - fix insert, create, migrate 
  26  - move startswith, endswith, contains into adapters 
  27  - handle _lastsql (where?) 
  28   
  29  Example of usage: 
  30   
  31  >>> # from dal import DAL, Field 
  32   
  33  ### create DAL connection (and create DB if not exists) 
  34  >>> db=DAL(('mysql://a:b@locahost/x','sqlite://storage.sqlite'),folder=None) 
  35   
  36  ### define a table 'person' (create/aster as necessary) 
  37  >>> person = db.define_table('person',Field('name','string')) 
  38   
  39  ### insert a record 
  40  >>> id = person.insert(name='James') 
  41   
  42  ### retrieve it by id 
  43  >>> james = person(id) 
  44   
  45  ### retrieve it by name 
  46  >>> james = person(name='James') 
  47   
  48  ### retrieve it by arbitrary query 
  49  >>> query = (person.name=='James')&(person.name.startswith('J')) 
  50  >>> james = db(query).select(person.ALL)[0] 
  51   
  52  ### update one record 
  53  >>> james.update_record(name='Jim') 
  54   
  55  ### update multiple records by query 
  56  >>> db(person.name.like('J%')).update(name='James') 
  57  1 
  58   
  59  ### delete records by query 
  60  >>> db(person.name.lower()=='jim').delete() 
  61  0 
  62   
  63  ### retrieve multiple records (rows) 
  64  >>> people = db(person).select(orderby=person.name,groupby=person.name,limitby=(0,100)) 
  65   
  66  ### further filter them 
  67  >>> james = people.find(lambda row: row.name=='James').first() 
  68  >>> print james.id, james.name 
  69  1 James 
  70   
  71  ### check aggrgates 
  72  >>> counter = person.id.count() 
  73  >>> print db(person).select(counter).first()(counter) 
  74  1 
  75   
  76  ### delete one record 
  77  >>> james.delete_record() 
  78  1 
  79   
  80  ### delete (drop) entire database table 
  81  >>> person.drop() 
  82   
  83  Supported field types: 
  84  id string text boolean integer double decimal password upload blob time date datetime, 
  85   
  86  Supported DAL URI strings: 
  87  'sqlite://test.db' 
  88  'sqlite:memory' 
  89  'jdbc:sqlite://test.db' 
  90  'mysql://root:none@localhost/test' 
  91  'postgres://mdipierro:none@localhost/test' 
  92  'jdbc:postgres://mdipierro:none@localhost/test' 
  93  'mssql://web2py:none@A64X2/web2py_test' 
  94  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  95  'oracle://username:password@database' 
  96  'firebird://user:password@server:3050/database' 
  97  'db2://DSN=dsn;UID=user;PWD=pass' 
  98  'firebird://username:password@hostname/database' 
  99  'firebird_embedded://username:password@c://path' 
 100  'informix://user:password@server:3050/database' 
 101  'informixu://user:password@server:3050/database' # unicode informix 
 102  'google:datastore' # for google app engine datastore 
 103  'google:sql' # for google app engine with sql (mysql compatible) 
 104   
 105  For more info: 
 106  help(DAL) 
 107  help(Field) 
 108  """ 
 109   
 110  ################################################################################### 
 111  # this file orly exposes DAL and Field 
 112  ################################################################################### 
 113   
 114  __all__ = ['DAL', 'Field'] 
 115  MAXCHARLENGTH = 512 
 116  INFINITY = 32768 # not quite but reasonable default max varchar length 
 117   
 118  import re 
 119  import sys 
 120  import locale 
 121  import os 
 122  import types 
 123  import cPickle 
 124  import datetime 
 125  import threading 
 126  import time 
 127  import cStringIO 
 128  import csv 
 129  import copy 
 130  import socket 
 131  import logging 
 132  import copy_reg 
 133  import base64 
 134  import shutil 
 135  import marshal 
 136  import decimal 
 137  import struct 
 138  import urllib 
 139  import hashlib 
 140   
 141  CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, 
 142                   types.MethodType, types.BuiltinMethodType) 
 143   
 144   
 145  ################################################################################### 
 146  # following checks allows running of dal without web2py as a standalone module 
 147  ################################################################################### 
 148  try: 
 149      from utils import web2py_uuid 
 150  except ImportError: 
 151      import uuid 
152 - def web2py_uuid(): return str(uuid.uuid4())
153 154 try: 155 import portalocker 156 have_portalocker = True 157 except ImportError: 158 have_portalocker = False 159 160 try: 161 import serializers 162 have_serializers = True 163 except ImportError: 164 have_serializers = False 165 166 try: 167 import validators 168 have_validators = True 169 except ImportError: 170 have_validators = False 171 172 logger = logging.getLogger("web2py.dal") 173 DEFAULT = lambda:0 174 175 sql_locker = threading.RLock() 176 thread = threading.local() 177 178 # internal representation of tables with field 179 # <table>.<field>, tables and fields may only be [a-zA-Z0-0_] 180 181 regex_dbname = re.compile('^(\w+)(\:\w+)*') 182 table_field = re.compile('^[\w_]+\.[\w_]+$') 183 regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 184 regex_cleanup_fn = re.compile('[\'"\s;]+') 185 string_unpack=re.compile('(?<!\|)\|(?!\|)') 186 regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 187 188 189 190 # list of drivers will be built on the fly 191 # and lists only what is available 192 drivers = [] 193 194 try: 195 from pysqlite2 import dbapi2 as sqlite3 196 drivers.append('pysqlite2') 197 except ImportError: 198 try: 199 from sqlite3 import dbapi2 as sqlite3 200 drivers.append('SQLite3') 201 except ImportError: 202 logger.debug('no sqlite3 or pysqlite2.dbapi2 driver') 203 204 try: 205 import contrib.pymysql as pymysql 206 drivers.append('pymysql') 207 except ImportError: 208 logger.debug('no pymysql driver') 209 210 try: 211 import psycopg2 212 drivers.append('PostgreSQL') 213 except ImportError: 214 logger.debug('no psycopg2 driver') 215 216 try: 217 import cx_Oracle 218 drivers.append('Oracle') 219 except ImportError: 220 logger.debug('no cx_Oracle driver') 221 222 try: 223 import pyodbc 224 drivers.append('MSSQL/DB2') 225 except ImportError: 226 logger.debug('no MSSQL/DB2 driver') 227 228 try: 229 import kinterbasdb 230 drivers.append('Interbase') 231 except ImportError: 232 logger.debug('no kinterbasdb driver') 233 234 try: 235 import informixdb 236 drivers.append('Informix') 237 logger.warning('Informix support is experimental') 238 except ImportError: 239 logger.debug('no informixdb driver') 240 241 try: 242 import sapdb 243 drivers.append('SAPDB') 244 logger.warning('SAPDB support is experimental') 245 except ImportError: 246 logger.debug('no sapdb driver') 247 248 try: 249 from com.ziclix.python.sql import zxJDBC 250 import java.sql 251 from org.sqlite import JDBC # required later by java.sql; ensure we have it 252 drivers.append('zxJDBC') 253 logger.warning('zxJDBC support is experimental') 254 is_jdbc = True 255 except ImportError: 256 logger.debug('no zxJDBC driver') 257 is_jdbc = False 258 259 try: 260 import ingresdbi 261 drivers.append('Ingres') 262 except ImportError: 263 logger.debug('no Ingres driver') 264 # NOTE could try JDBC....... 265 266 try: 267 from new import classobj 268 from google.appengine.ext import db as gae 269 from google.appengine.api import namespace_manager, rdbms 270 from google.appengine.api.datastore_types import Key ### needed for belongs on ID 271 from google.appengine.ext.db.polymodel import PolyModel 272 273 drivers.append('google') 274
275 - class GAEDecimalProperty(gae.Property):
276 """ 277 GAE decimal implementation 278 """ 279 data_type = decimal.Decimal 280
281 - def __init__(self, precision, scale, **kwargs):
282 super(GAEDecimalProperty, self).__init__(self, **kwargs) 283 d = '1.' 284 for x in range(scale): 285 d += '0' 286 self.round = decimal.Decimal(d)
287
288 - def get_value_for_datastore(self, model_instance):
289 value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance) 290 if value: 291 return str(value) 292 else: 293 return None
294
295 - def make_value_from_datastore(self, value):
296 if value: 297 return decimal.Decimal(value).quantize(self.round) 298 else: 299 return None
300
301 - def validate(self, value):
302 value = super(GAEDecimalProperty, self).validate(value) 303 if value is None or isinstance(value, decimal.Decimal): 304 return value 305 elif isinstance(value, basestring): 306 return decimal.Decimal(value) 307 raise gae.BadValueError("Property %s must be a Decimal or string." % self.name)
308 309 except ImportError: 310 pass 311 312 ################################################################################### 313 # class that handles connection pooling (all adapters derived form this one) 314 ################################################################################### 315
316 -class ConnectionPool(object):
317 318 pools = {} 319 320 @staticmethod
321 - def set_folder(folder):
322 thread.folder = folder
323 324 # ## this allows gluon to commit/rollback all dbs in this thread 325 326 @staticmethod
327 - def close_all_instances(action):
328 """ to close cleanly databases in a multithreaded environment """ 329 if not hasattr(thread,'instances'): 330 return 331 while thread.instances: 332 instance = thread.instances.pop() 333 getattr(instance,action)() 334 # ## if you want pools, recycle this connection 335 really = True 336 if instance.pool_size: 337 sql_locker.acquire() 338 pool = ConnectionPool.pools[instance.uri] 339 if len(pool) < instance.pool_size: 340 pool.append(instance.connection) 341 really = False 342 sql_locker.release() 343 if really: 344 getattr(instance,'close')() 345 return
346
347 - def find_or_make_work_folder(self):
348 """ this actually does not make the folder. it has to be there """ 349 if hasattr(thread,'folder'): 350 self.folder = thread.folder 351 else: 352 self.folder = thread.folder = '' 353 354 # Creating the folder if it does not exist 355 if False and self.folder and not os.path.exists(self.folder): 356 os.mkdir(self._folder)
357
358 - def pool_connection(self, f):
359 if not self.pool_size: 360 self.connection = f() 361 else: 362 uri = self.uri 363 sql_locker.acquire() 364 if not uri in ConnectionPool.pools: 365 ConnectionPool.pools[uri] = [] 366 if ConnectionPool.pools[uri]: 367 self.connection = ConnectionPool.pools[uri].pop() 368 sql_locker.release() 369 else: 370 sql_locker.release() 371 self.connection = f() 372 if not hasattr(thread,'instances'): 373 thread.instances = [] 374 thread.instances.append(self)
375 376 377 ################################################################################### 378 # this is a generic adapter that does nothing; all others are derived form this one 379 ################################################################################### 380
381 -class BaseAdapter(ConnectionPool):
382 383 maxcharlength = INFINITY 384 commit_on_alter_table = False 385 support_distributed_transaction = False 386 uploads_in_blob = False 387 types = { 388 'boolean': 'CHAR(1)', 389 'string': 'CHAR(%(length)s)', 390 'text': 'TEXT', 391 'password': 'CHAR(%(length)s)', 392 'blob': 'BLOB', 393 'upload': 'CHAR(%(length)s)', 394 'integer': 'INTEGER', 395 'double': 'DOUBLE', 396 'decimal': 'DOUBLE', 397 'date': 'DATE', 398 'time': 'TIME', 399 'datetime': 'TIMESTAMP', 400 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 401 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 402 'list:integer': 'TEXT', 403 'list:string': 'TEXT', 404 'list:reference': 'TEXT', 405 } 406
407 - def file_exists(self, filename):
408 """ 409 to be used ONLY for files that on GAE may not be on filesystem 410 """ 411 return os.path.exists(filename)
412
413 - def file_open(self, filename, mode='rb', lock=True):
414 """ 415 to be used ONLY for files that on GAE may not be on filesystem 416 """ 417 fileobj = open(filename,mode) 418 if have_portalocker and lock: 419 if mode in ('r','rb'): 420 portalocker.lock(fileobj,portalocker.LOCK_SH) 421 elif mode in ('w','wb','a'): 422 portalocker.lock(fileobj,portalocker.LOCK_EX) 423 else: 424 raise RuntimeError, "Unsupported file_open mode" 425 return fileobj
426
427 - def file_close(self, fileobj, unlock=True):
428 """ 429 to be used ONLY for files that on GAE may not be on filesystem 430 """ 431 if fileobj: 432 if have_portalocker and unlock: 433 portalocker.unlock(fileobj) 434 fileobj.close()
435
436 - def file_delete(self, filename):
437 os.unlink(filename)
438
439 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 440 credential_decoder=lambda x:x, driver_args={}):
441 self.db = db 442 self.dbengine = "None" 443 self.uri = uri 444 self.pool_size = pool_size 445 self.folder = folder 446 self.db_codec = db_codec 447 class Dummy(object): 448 lastrowid = 1 449 def __getattr__(self, value): 450 return lambda *a, **b: []
451 self.connection = Dummy() 452 self.cursor = Dummy() 453
454 - def sequence_name(self,tablename):
455 return '%s_sequence' % tablename
456
457 - def trigger_name(self,tablename):
458 return '%s_sequence' % tablename
459 460
461 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
462 fields = [] 463 sql_fields = {} 464 sql_fields_aux = {} 465 TFK = {} 466 tablename = table._tablename 467 for field in table: 468 k = field.name 469 if isinstance(field.type,SQLCustomType): 470 ftype = field.type.native or field.type.type 471 elif field.type.startswith('reference'): 472 referenced = field.type[10:].strip() 473 constraint_name = self.constraint_name(tablename, field.name) 474 if hasattr(table,'_primarykey'): 475 rtablename,rfieldname = referenced.split('.') 476 rtable = table._db[rtablename] 477 rfield = rtable[rfieldname] 478 # must be PK reference or unique 479 if rfieldname in rtable._primarykey or rfield.unique: 480 ftype = self.types[rfield.type[:9]] % dict(length=rfield.length) 481 # multicolumn primary key reference? 482 if not rfield.unique and len(rtable._primarykey)>1 : 483 # then it has to be a table level FK 484 if rtablename not in TFK: 485 TFK[rtablename] = {} 486 TFK[rtablename][rfieldname] = field.name 487 else: 488 ftype = ftype + \ 489 self.types['reference FK'] %dict(\ 490 constraint_name=constraint_name, 491 table_name=tablename, 492 field_name=field.name, 493 foreign_key='%s (%s)'%(rtablename, rfieldname), 494 on_delete_action=field.ondelete) 495 else: 496 ftype = self.types[field.type[:9]]\ 497 % dict(table_name=tablename, 498 field_name=field.name, 499 constraint_name=constraint_name, 500 foreign_key=referenced + ('(%s)' % table._db[referenced].fields[0]), 501 on_delete_action=field.ondelete) 502 elif field.type.startswith('list:reference'): 503 ftype = self.types[field.type[:14]] 504 elif field.type.startswith('decimal'): 505 precision, scale = [int(x) for x in field.type[8:-1].split(',')] 506 ftype = self.types[field.type[:7]] % \ 507 dict(precision=precision,scale=scale) 508 elif not field.type in self.types: 509 raise SyntaxError, 'Field: unknown field type: %s for %s' % \ 510 (field.type, field.name) 511 else: 512 ftype = self.types[field.type]\ 513 % dict(length=field.length) 514 if not field.type.startswith('id') and not field.type.startswith('reference'): 515 if field.notnull: 516 ftype += ' NOT NULL' 517 else: 518 ftype += self.ALLOW_NULL() 519 if field.unique: 520 ftype += ' UNIQUE' 521 522 # add to list of fields 523 sql_fields[field.name] = ftype 524 525 if field.default!=None: 526 # caveat: sql_fields and sql_fields_aux differ for default values 527 # sql_fields is used to trigger migrations and sql_fields_aux 528 # are used for create table 529 # the reason is that we do not want to trigger a migration simply 530 # because a default value changes 531 not_null = self.NOT_NULL(field.default,field.type) 532 ftype = ftype.replace('NOT NULL',not_null) 533 sql_fields_aux[field.name] = ftype 534 535 fields.append('%s %s' % (field.name, ftype)) 536 other = ';' 537 538 # backend-specific extensions to fields 539 if self.dbengine == 'mysql': 540 if not hasattr(table, "_primarykey"): 541 fields.append('PRIMARY KEY(%s)' % table.fields[0]) 542 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 543 544 fields = ',\n '.join(fields) 545 for rtablename in TFK: 546 rfields = TFK[rtablename] 547 pkeys = table._db[rtablename]._primarykey 548 fkeys = [ rfields[k] for k in pkeys ] 549 fields = fields + ',\n ' + \ 550 self.types['reference TFK'] %\ 551 dict(table_name=tablename, 552 field_name=', '.join(fkeys), 553 foreign_table=rtablename, 554 foreign_key=', '.join(pkeys), 555 on_delete_action=field.ondelete) 556 557 if hasattr(table,'_primarykey'): 558 query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \ 559 (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) 560 else: 561 query = '''CREATE TABLE %s(\n %s\n)%s''' % \ 562 (tablename, fields, other) 563 564 if self.uri.startswith('sqlite:///'): 565 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 566 dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding) 567 else: 568 dbpath = self.folder 569 570 if not migrate: 571 return query 572 elif self.uri.startswith('sqlite:memory'): 573 table._dbt = None 574 elif isinstance(migrate, str): 575 table._dbt = os.path.join(dbpath, migrate) 576 else: 577 table._dbt = os.path.join(dbpath, '%s_%s.table' \ 578 % (hashlib.md5(self.uri).hexdigest(), tablename)) 579 if table._dbt: 580 table._loggername = os.path.join(dbpath, 'sql.log') 581 logfile = self.file_open(table._loggername, 'a') 582 else: 583 logfile = None 584 if not table._dbt or not self.file_exists(table._dbt): 585 if table._dbt: 586 logfile.write('timestamp: %s\n' 587 % datetime.datetime.today().isoformat()) 588 logfile.write(query + '\n') 589 if not fake_migrate: 590 self.create_sequence_and_triggers(query,table) 591 table._db.commit() 592 if table._dbt: 593 tfile = self.file_open(table._dbt, 'w') 594 cPickle.dump(sql_fields, tfile) 595 self.file_close(tfile) 596 if fake_migrate: 597 logfile.write('faked!\n') 598 else: 599 logfile.write('success!\n') 600 else: 601 tfile = self.file_open(table._dbt, 'r') 602 try: 603 sql_fields_old = cPickle.load(tfile) 604 except EOFError: 605 self.file_close(tfile) 606 self.file_close(logfile) 607 raise RuntimeError, 'File %s appears corrupted' % table._dbt 608 self.file_close(tfile) 609 if sql_fields != sql_fields_old: 610 self.migrate_table(table, 611 sql_fields, sql_fields_old, 612 sql_fields_aux, logfile, 613 fake_migrate=fake_migrate) 614 self.file_close(logfile) 615 return query
616
617 - def migrate_table( 618 self, 619 table, 620 sql_fields, 621 sql_fields_old, 622 sql_fields_aux, 623 logfile, 624 fake_migrate=False, 625 ):
626 tablename = table._tablename 627 ### make sure all field names are lower case to avoid conflicts 628 sql_fields = dict((k.lower(), v) for k, v in sql_fields.items()) 629 sql_fields_old = dict((k.lower(), v) for k, v in sql_fields_old.items()) 630 sql_fields_aux = dict((k.lower(), v) for k, v in sql_fields_aux.items()) 631 632 keys = sql_fields.keys() 633 for key in sql_fields_old: 634 if not key in keys: 635 keys.append(key) 636 if self.dbengine == 'mssql': 637 new_add = '; ALTER TABLE %s ADD ' % tablename 638 else: 639 new_add = ', ADD ' 640 641 fields_changed = False 642 sql_fields_current = copy.copy(sql_fields_old) 643 for key in keys: 644 if not key in sql_fields_old: 645 sql_fields_current[key] = sql_fields[key] 646 query = ['ALTER TABLE %s ADD %s %s;' % \ 647 (tablename, key, sql_fields_aux[key].replace(', ', new_add))] 648 elif self.dbengine == 'sqlite': 649 query = None 650 elif not key in sql_fields: 651 del sql_fields_current[key] 652 if not self.dbengine in ('firebird',): 653 query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] 654 else: 655 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 656 elif sql_fields[key] != sql_fields_old[key] \ 657 and not isinstance(table[key].type, SQLCustomType) \ 658 and not (table[key].type.startswith('reference') and \ 659 sql_fields[key].startswith('INT,') and \ 660 sql_fields_old[key].startswith('INT NOT NULL,')): 661 sql_fields_current[key] = sql_fields[key] 662 t = tablename 663 tt = sql_fields_aux[key].replace(', ', new_add) 664 if not self.dbengine in ('firebird',): 665 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 666 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 667 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 668 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 669 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 670 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 671 else: 672 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 673 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 674 'ALTER TABLE %s DROP %s;' % (t, key), 675 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 676 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 677 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 678 else: 679 query = None 680 681 if query: 682 fields_changed = True 683 logfile.write('timestamp: %s\n' 684 % datetime.datetime.today().isoformat()) 685 table._db['_lastsql'] = '\n'.join(query) 686 for sub_query in query: 687 logfile.write(sub_query + '\n') 688 if not fake_migrate: 689 self.execute(sub_query) 690 # caveat. mysql, oracle and firebird do not allow multiple alter table 691 # in one transaction so we must commit partial transactions and 692 # update table._dbt after alter table. 693 if table._db._adapter.commit_on_alter_table: 694 table._db.commit() 695 tfile = self.file_open(table._dbt, 'w') 696 cPickle.dump(sql_fields_current, tfile) 697 self.file_close(tfile) 698 logfile.write('success!\n') 699 else: 700 logfile.write('faked!\n') 701 702 if fields_changed and not self.dbengine in ['mysql','oracle','firebird']: 703 table._db.commit() 704 tfile = self.file_open(table._dbt, 'w') 705 cPickle.dump(sql_fields_current, tfile) 706 self.file_close(tfile)
707
708 - def LOWER(self,first):
709 return 'LOWER(%s)' % self.expand(first)
710
711 - def UPPER(self,first):
712 return 'UPPER(%s)' % self.expand(first)
713
714 - def EXTRACT(self,first,what):
715 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
716
717 - def AGGREGATE(self,first,what):
718 return "%s(%s)" % (what,self.expand(first))
719
720 - def LEFT_JOIN(self):
721 return 'LEFT JOIN'
722
723 - def RANDOM(self):
724 return 'Random()'
725
726 - def NOT_NULL(self,default,field_type):
727 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
728
729 - def COALESCE_ZERO(self,first):
730 return 'COALESCE(%s,0)' % self.expand(first)
731
732 - def ALLOW_NULL(self):
733 return ''
734
735 - def SUBSTRING(self,field,parameters):
736 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
737
738 - def PRIMARY_KEY(self,key):
739 return 'PRIMARY KEY(%s)' % key
740
741 - def _drop(self,table,mode):
742 return ['DROP TABLE %s;' % table]
743
744 - def drop(self, table, mode=''):
745 if table._dbt: 746 logfile = self.file_open(table._loggername, 'a') 747 queries = self._drop(table, mode) 748 for query in queries: 749 if table._dbt: 750 logfile.write(query + '\n') 751 self.execute(query) 752 table._db.commit() 753 del table._db[table._tablename] 754 del table._db.tables[table._db.tables.index(table._tablename)] 755 table._db._update_referenced_by(table._tablename) 756 if table._dbt: 757 self.file_delete(table._dbt) 758 logfile.write('success!\n')
759
760 - def _insert(self,table,fields):
761 keys = ','.join(f.name for f,v in fields) 762 values = ','.join(self.expand(v,f.type) for f,v in fields) 763 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
764
765 - def insert(self,table,fields):
766 query = self._insert(table,fields) 767 try: 768 self.execute(query) 769 except Exception, e: 770 if isinstance(e,self.integrity_error_class()): 771 return None 772 raise e 773 if hasattr(table,'_primarykey'): 774 return dict( [ (k,fields[k]) for k in table._primarykey ]) 775 id = self.lastrowid(table) 776 if not isinstance(id,int): 777 return id 778 rid = Reference(id) 779 (rid._table, rid._record) = (table, None) 780 return rid
781
782 - def bulk_insert(self,table,items):
783 return [self.insert(table,item) for item in items]
784
785 - def NOT(self,first):
786 return '(NOT %s)' % self.expand(first)
787
788 - def AND(self,first,second):
789 return '(%s AND %s)' % (self.expand(first),self.expand(second))
790
791 - def OR(self,first,second):
792 return '(%s OR %s)' % (self.expand(first),self.expand(second))
793
794 - def BELONGS(self,first,second):
795 if isinstance(second,str): 796 return '(%s IN (%s))' % (self.expand(first),second[:-1]) 797 return '(%s IN (%s))' % (self.expand(first), 798 ','.join(self.expand(item,first.type) for item in second))
799
800 - def LIKE(self,first,second):
801 return '(%s LIKE %s)' % (self.expand(first),self.expand(second,'string'))
802
803 - def STARTSWITH(self,first,second):
804 return '(%s LIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
805
806 - def ENDSWITH(self,first,second):
807 return '(%s LIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
808
809 - def CONTAINS(self,first,second):
810 if first.type in ('string','text'): 811 key = '%'+str(second).replace('%','%%')+'%' 812 elif first.type.startswith('list:'): 813 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 814 return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
815
816 - def EQ(self,first,second=None):
817 if second is None: 818 return '(%s IS NULL)' % self.expand(first) 819 return '(%s = %s)' % (self.expand(first),self.expand(second,first.type))
820
821 - def NE(self,first,second=None):
822 if second is None: 823 return '(%s IS NOT NULL)' % self.expand(first) 824 return '(%s <> %s)' % (self.expand(first),self.expand(second,first.type))
825
826 - def LT(self,first,second=None):
827 return '(%s < %s)' % (self.expand(first),self.expand(second,first.type))
828
829 - def LE(self,first,second=None):
830 return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type))
831
832 - def GT(self,first,second=None):
833 return '(%s > %s)' % (self.expand(first),self.expand(second,first.type))
834
835 - def GE(self,first,second=None):
836 return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type))
837
838 - def ADD(self,first,second):
839 return '(%s + %s)' % (self.expand(first),self.expand(second,first.type))
840
841 - def SUB(self,first,second):
842 return '(%s - %s)' % (self.expand(first),self.expand(second,first.type))
843
844 - def MUL(self,first,second):
845 return '(%s * %s)' % (self.expand(first),self.expand(second,first.type))
846
847 - def DIV(self,first,second):
848 return '(%s / %s)' % (self.expand(first),self.expand(second,first.type))
849
850 - def MOD(self,first,second):
851 return '(%s %% %s)' % (self.expand(first),self.expand(second,first.type))
852
853 - def AS(self,first,second):
854 return '(%s AS %s)' % (self.expand(first),second)
855
856 - def ON(self,first,second):
857 return '%s ON %s' % (self.expand(first),self.expand(second))
858
859 - def INVERT(self,first):
860 return '%s DESC' % self.expand(first)
861
862 - def COMMA(self,first,second):
863 return '%s, %s' % (self.expand(first),self.expand(second))
864
865 - def expand(self,expression,field_type=None):
866 if isinstance(expression,Field): 867 return str(expression) 868 elif isinstance(expression, (Expression, Query)): 869 if not expression.second is None: 870 return expression.op(expression.first, expression.second) 871 elif not expression.first is None: 872 return expression.op(expression.first) 873 else: 874 return expression.op() 875 elif field_type: 876 return self.represent(expression,field_type) 877 elif isinstance(expression,(list,tuple)): 878 return ','.join([self.represent(item,field_type) for item in expression]) 879 else: 880 return str(expression)
881
882 - def alias(self,table,alias):
883 """ 884 given a table object, makes a new table object 885 with alias name. 886 """ 887 other = copy.copy(table) 888 other['_ot'] = other._tablename 889 other['ALL'] = SQLALL(other) 890 other['_tablename'] = alias 891 for fieldname in other.fields: 892 other[fieldname] = copy.copy(other[fieldname]) 893 other[fieldname]._tablename = alias 894 other[fieldname].tablename = alias 895 other[fieldname].table = other 896 table._db[alias] = table 897 return other
898
899 - def _truncate(self,table,mode = ''):
900 tablename = table._tablename 901 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
902
903 - def truncate(self,table,mode= ' '):
904 if table._dbt: 905 logfile = self.file_open(table._loggername, 'a') 906 queries = table._db._adapter._truncate(table, mode) 907 for query in queries: 908 if table._dbt: 909 logfile.write(query + '\n') 910 self.execute(query) 911 table._db.commit() 912 if table._dbt: 913 logfile.write('success!\n')
914
915 - def _update(self,tablename,query,fields):
916 if query: 917 sql_w = ' WHERE ' + self.expand(query) 918 else: 919 sql_w = '' 920 sql_v = ','.join(['%s=%s' % (field.name, self.expand(value,field.type)) for (field,value) in fields]) 921 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
922
923 - def update(self,tablename,query,fields):
924 sql = self._update(tablename,query,fields) 925 self.execute(sql) 926 try: 927 return self.cursor.rowcount 928 except: 929 return None
930
931 - def _delete(self,tablename, query):
932 if query: 933 sql_w = ' WHERE ' + self.expand(query) 934 else: 935 sql_w = '' 936 return 'DELETE FROM %s%s;' % (tablename, sql_w)
937
938 - def delete(self,tablename,query):
939 sql = self._delete(tablename,query) 940 ### special code to handle CASCADE in SQLite 941 db = self.db 942 table = db[tablename] 943 if self.dbengine=='sqlite' and table._referenced_by: 944 deleted = [x[table._id.name] for x in db(query).select(table._id)] 945 ### end special code to handle CASCADE in SQLite 946 self.execute(sql) 947 try: 948 counter = self.cursor.rowcount 949 except: 950 counter = None 951 ### special code to handle CASCADE in SQLite 952 if self.dbengine=='sqlite' and counter: 953 for tablename,fieldname in table._referenced_by: 954 f = db[tablename][fieldname] 955 if f.type=='reference '+table._tablename and f.ondelete=='CASCADE': 956 db(db[tablename][fieldname].belongs(deleted)).delete() 957 ### end special code to handle CASCADE in SQLite 958 return counter
959
960 - def get_table(self,query):
961 tablenames = self.tables(query) 962 if len(tablenames)==1: 963 return tablenames[0] 964 elif len(tablenames)<1: 965 raise RuntimeError, "No table selected" 966 else: 967 raise RuntimeError, "Too many tables selected"
968
969 - def _select(self, query, fields, attributes):
970 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 971 'required','cache','left', 972 'distinct','having')): 973 raise SyntaxError, 'invalid select attribute: %s' % key 974 # ## if not fields specified take them all from the requested tables 975 new_fields = [] 976 for item in fields: 977 if isinstance(item,SQLALL): 978 new_fields += item.table 979 else: 980 new_fields.append(item) 981 fields = new_fields 982 tablenames = self.tables(query) 983 if not fields: 984 for table in tablenames: 985 for field in self.db[table]: 986 fields.append(field) 987 else: 988 for field in fields: 989 if isinstance(field,basestring) and table_field.match(field): 990 tn,fn = field.split('.') 991 field = self.db[tn][fn] 992 for tablename in self.tables(field): 993 if not tablename in tablenames: 994 tablenames.append(tablename) 995 if len(tablenames) < 1: 996 raise SyntaxError, 'Set: no tables selected' 997 sql_f = ', '.join([self.expand(f) for f in fields]) 998 self._colnames = [c.strip() for c in sql_f.split(', ')] 999 if query: 1000 sql_w = ' WHERE ' + self.expand(query) 1001 else: 1002 sql_w = '' 1003 sql_o = '' 1004 sql_s = '' 1005 left = attributes.get('left', False) 1006 distinct = attributes.get('distinct', False) 1007 groupby = attributes.get('groupby', False) 1008 orderby = attributes.get('orderby', False) 1009 having = attributes.get('having', False) 1010 limitby = attributes.get('limitby', False) 1011 if distinct is True: 1012 sql_s += 'DISTINCT' 1013 elif distinct: 1014 sql_s += 'DISTINCT ON (%s)' % distinct 1015 if left: 1016 join = attributes['left'] 1017 command = self.LEFT_JOIN() 1018 if not isinstance(join, (tuple, list)): 1019 join = [join] 1020 joint = [t._tablename for t in join if not isinstance(t,Expression)] 1021 joinon = [t for t in join if isinstance(t, Expression)] 1022 #patch join+left patch (solves problem with ordering in left joins) 1023 tables_to_merge={} 1024 [tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon] 1025 joinont = [t.first._tablename for t in joinon] 1026 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1027 important_tablenames = joint + joinont + tables_to_merge.keys() 1028 excluded = [t for t in tablenames if not t in important_tablenames ] 1029 sql_t = ', '.join([ t for t in excluded + tables_to_merge.keys()]) 1030 if joint: 1031 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1032 #/patch join+left patch 1033 for t in joinon: 1034 sql_t += ' %s %s' % (command, str(t)) 1035 else: 1036 sql_t = ', '.join(tablenames) 1037 if groupby: 1038 if isinstance(groupby, (list, tuple)): 1039 groupby = xorify(groupby) 1040 sql_o += ' GROUP BY %s' % self.expand(groupby) 1041 if having: 1042 sql_o += ' HAVING %s' % attributes['having'] 1043 if orderby: 1044 if isinstance(orderby, (list, tuple)): 1045 orderby = xorify(orderby) 1046 if str(orderby) == '<random>': 1047 sql_o += ' ORDER BY %s' % self.RANDOM() 1048 else: 1049 sql_o += ' ORDER BY %s' % self.expand(orderby) 1050 if limitby: 1051 if not orderby and tablenames: 1052 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])]) 1053 # oracle does not support limitby 1054 return self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby)
1055
1056 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1057 if limitby: 1058 (lmin, lmax) = limitby 1059 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1060 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1061
1062 - def select(self,query,fields,attributes):
1063 """ 1064 Always returns a Rows object, even if it may be empty 1065 """ 1066 def response(sql): 1067 self.execute(sql) 1068 return self.cursor.fetchall()
1069 sql = self._select(query,fields,attributes) 1070 if attributes.get('cache', None): 1071 (cache_model, time_expire) = attributes['cache'] 1072 del attributes['cache'] 1073 key = self.uri + '/' + sql 1074 key = (key<=200) and key or hashlib.md5(key).hexdigest() 1075 rows = cache_model(key, lambda: response(sql), time_expire) 1076 else: 1077 rows = response(sql) 1078 if isinstance(rows,tuple): 1079 rows = list(rows) 1080 limitby = attributes.get('limitby',None) or (0,) 1081 rows = self.rowslice(rows,limitby[0],None) 1082 return self.parse(rows,self._colnames) 1083
1084 - def _count(self,query,distinct=None):
1085 tablenames = self.tables(query) 1086 if query: 1087 sql_w = ' WHERE ' + self.expand(query) 1088 else: 1089 sql_w = '' 1090 sql_t = ','.join(tablenames) 1091 if distinct: 1092 if isinstance(distinct,(list,tuple)): 1093 distinct = xorify(distinct) 1094 sql_d = self.expand(distinct) 1095 return 'SELECT count(DISTINCT %s) FROM %s%s' % (sql_d, sql_t, sql_w) 1096 return 'SELECT count(*) FROM %s%s' % (sql_t, sql_w)
1097
1098 - def count(self,query,distinct=None):
1099 self.execute(self._count(query,distinct)) 1100 return self.cursor.fetchone()[0]
1101 1102
1103 - def tables(self,query):
1104 tables = set() 1105 if isinstance(query, Field): 1106 tables.add(query.tablename) 1107 elif isinstance(query, (Expression, Query)): 1108 if query.first!=None: 1109 tables = tables.union(self.tables(query.first)) 1110 if query.second!=None: 1111 tables = tables.union(self.tables(query.second)) 1112 return list(tables)
1113
1114 - def commit(self):
1115 return self.connection.commit()
1116
1117 - def rollback(self):
1118 return self.connection.rollback()
1119
1120 - def close(self):
1121 return self.connection.close()
1122
1123 - def distributed_transaction_begin(self,key):
1124 return
1125
1126 - def prepare(self,key):
1127 self.connection.prepare()
1128
1129 - def commit_prepared(self,key):
1130 self.connection.commit()
1131
1132 - def rollback_prepared(self,key):
1133 self.connection.rollback()
1134
1135 - def concat_add(self,table):
1136 return ', ADD '
1137
1138 - def constraint_name(self, table, fieldname):
1139 return '%s_%s__constraint' % (table,fieldname)
1140
1141 - def create_sequence_and_triggers(self, query, table, **args):
1142 self.execute(query)
1143
1144 - def log_execute(self,*a,**b):
1145 self.db._lastsql = a[0] 1146 t0 = time.time() 1147 ret = self.cursor.execute(*a,**b) 1148 self.db._timings.append((a[0],time.time()-t0)) 1149 return ret
1150
1151 - def execute(self,*a,**b):
1152 return self.log_execute(*a, **b)
1153
1154 - def represent(self, obj, fieldtype):
1155 if isinstance(obj,CALLABLETYPES): 1156 obj = obj() 1157 if isinstance(fieldtype, SQLCustomType): 1158 return fieldtype.encoder(obj) 1159 if isinstance(obj, (Expression, Field)): 1160 return str(obj) 1161 if fieldtype.startswith('list:'): 1162 if not obj: 1163 obj = [] 1164 if not isinstance(obj, (list, tuple)): 1165 obj = [obj] 1166 if isinstance(obj, (list, tuple)): 1167 obj = bar_encode(obj) 1168 if obj is None: 1169 return 'NULL' 1170 if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']: 1171 return 'NULL' 1172 r = self.represent_exceptions(obj,fieldtype) 1173 if r != None: 1174 return r 1175 if fieldtype == 'boolean': 1176 if obj and not str(obj)[:1].upper() in ['F', '0']: 1177 return "'T'" 1178 else: 1179 return "'F'" 1180 if fieldtype == 'id' or fieldtype == 'integer': 1181 return str(int(obj)) 1182 if fieldtype.startswith('decimal'): 1183 return str(obj) 1184 elif fieldtype.startswith('reference'): # reference 1185 if fieldtype.find('.')>0: 1186 return repr(obj) 1187 elif isinstance(obj, (Row, Reference)): 1188 return str(obj['id']) 1189 return str(int(obj)) 1190 elif fieldtype == 'double': 1191 return repr(float(obj)) 1192 if isinstance(obj, unicode): 1193 obj = obj.encode(self.db_codec) 1194 if fieldtype == 'blob': 1195 obj = base64.b64encode(str(obj)) 1196 elif fieldtype == 'date': 1197 if isinstance(obj, (datetime.date, datetime.datetime)): 1198 obj = obj.isoformat()[:10] 1199 else: 1200 obj = str(obj) 1201 elif fieldtype == 'datetime': 1202 if isinstance(obj, datetime.datetime): 1203 obj = obj.isoformat()[:19].replace('T',' ') 1204 elif isinstance(obj, datetime.date): 1205 obj = obj.isoformat()[:10]+' 00:00:00' 1206 else: 1207 obj = str(obj) 1208 elif fieldtype == 'time': 1209 if isinstance(obj, datetime.time): 1210 obj = obj.isoformat()[:10] 1211 else: 1212 obj = str(obj) 1213 if not isinstance(obj,str): 1214 obj = str(obj) 1215 try: 1216 obj.decode(self.db_codec) 1217 except: 1218 obj = obj.decode('latin1').encode(self.db_codec) 1219 return "'%s'" % obj.replace("'", "''")
1220
1221 - def represent_exceptions(self, obj, fieldtype):
1222 return None
1223
1224 - def lastrowid(self,table):
1225 return None
1226
1227 - def integrity_error_class(self):
1228 return type(None)
1229
1230 - def rowslice(self,rows,minimum=0,maximum=None):
1231 """ by default this function does nothing, overload when db does not do slicing """ 1232 return rows
1233
1234 - def parse(self, rows, colnames, blob_decode=True):
1235 db = self.db 1236 virtualtables = [] 1237 new_rows = [] 1238 for (i,row) in enumerate(rows): 1239 new_row = Row() 1240 for j,colname in enumerate(colnames): 1241 value = row[j] 1242 if not table_field.match(colnames[j]): 1243 if not '_extra' in new_row: 1244 new_row['_extra'] = Row() 1245 new_row['_extra'][colnames[j]] = value 1246 select_as_parser = re.compile("\s+AS\s+(\S+)") 1247 new_column_name = select_as_parser.search(colnames[j]) 1248 if not new_column_name is None: 1249 column_name = new_column_name.groups(0) 1250 setattr(new_row,column_name[0],value) 1251 continue 1252 (tablename, fieldname) = colname.split('.') 1253 table = db[tablename] 1254 field = table[fieldname] 1255 field_type = field.type 1256 if field.type != 'blob' and isinstance(value, str): 1257 try: 1258 value = value.decode(db._db_codec) 1259 except Exception: 1260 pass 1261 if isinstance(value, unicode): 1262 value = value.encode('utf-8') 1263 if not tablename in new_row: 1264 colset = new_row[tablename] = Row() 1265 virtualtables.append(tablename) 1266 else: 1267 colset = new_row[tablename] 1268 1269 if isinstance(field_type, SQLCustomType): 1270 colset[fieldname] = field_type.decoder(value) 1271 # field_type = field_type.type 1272 elif not isinstance(field_type, str) or value is None: 1273 colset[fieldname] = value 1274 elif isinstance(field_type, str) and \ 1275 field_type.startswith('reference'): 1276 referee = field_type[10:].strip() 1277 if not '.' in referee: 1278 colset[fieldname] = rid = Reference(value) 1279 (rid._table, rid._record) = (db[referee], None) 1280 else: ### reference not by id 1281 colset[fieldname] = value 1282 elif field_type == 'boolean': 1283 if value == True or str(value)[:1].lower() == 't': 1284 colset[fieldname] = True 1285 else: 1286 colset[fieldname] = False 1287 elif field_type == 'date' \ 1288 and (not isinstance(value, datetime.date)\ 1289 or isinstance(value, datetime.datetime)): 1290 (y, m, d) = [int(x) for x in 1291 str(value)[:10].strip().split('-')] 1292 colset[fieldname] = datetime.date(y, m, d) 1293 elif field_type == 'time' \ 1294 and not isinstance(value, datetime.time): 1295 time_items = [int(x) for x in 1296 str(value)[:8].strip().split(':')[:3]] 1297 if len(time_items) == 3: 1298 (h, mi, s) = time_items 1299 else: 1300 (h, mi, s) = time_items + [0] 1301 colset[fieldname] = datetime.time(h, mi, s) 1302 elif field_type == 'datetime'\ 1303 and not isinstance(value, datetime.datetime): 1304 (y, m, d) = [int(x) for x in 1305 str(value)[:10].strip().split('-')] 1306 time_items = [int(x) for x in 1307 str(value)[11:19].strip().split(':')[:3]] 1308 if len(time_items) == 3: 1309 (h, mi, s) = time_items 1310 else: 1311 (h, mi, s) = time_items + [0] 1312 colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) 1313 elif field_type == 'blob' and blob_decode: 1314 colset[fieldname] = base64.b64decode(str(value)) 1315 elif field_type.startswith('decimal'): 1316 decimals = [int(x) for x in field_type[8:-1].split(',')][-1] 1317 if self.dbengine == 'sqlite': 1318 value = ('%.' + str(decimals) + 'f') % value 1319 if not isinstance(value, decimal.Decimal): 1320 value = decimal.Decimal(str(value)) 1321 colset[fieldname] = value 1322 elif field_type.startswith('list:integer'): 1323 if not self.dbengine=='google:datastore': 1324 colset[fieldname] = bar_decode_integer(value) 1325 else: 1326 colset[fieldname] = value 1327 elif field_type.startswith('list:reference'): 1328 if not self.dbengine=='google:datastore': 1329 colset[fieldname] = bar_decode_integer(value) 1330 else: 1331 colset[fieldname] = value 1332 elif field_type.startswith('list:string'): 1333 if not self.dbengine=='google:datastore': 1334 colset[fieldname] = bar_decode_string(value) 1335 else: 1336 colset[fieldname] = value 1337 else: 1338 colset[fieldname] = value 1339 if field_type == 'id': 1340 id = colset[field.name] 1341 colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) 1342 colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() 1343 for (referee_table, referee_name) in \ 1344 table._referenced_by: 1345 s = db[referee_table][referee_name] 1346 colset[referee_table] = Set(db, s == id) 1347 colset['id'] = id 1348 new_rows.append(new_row) 1349 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 1350 for tablename in virtualtables: 1351 for item in db[tablename].virtualfields: 1352 try: 1353 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 1354 except KeyError: 1355 # to avoid breaking virtualfields when partial select 1356 pass 1357 return rowsobj
1358 1359 1360 ################################################################################### 1361 # List of all the available adapters, they all extend BaseAdapter 1362 ################################################################################### 1363
1364 -class SQLiteAdapter(BaseAdapter):
1365
1366 - def EXTRACT(self,field,what):
1367 return "web2py_extract('%s',%s)" % (what,self.expand(field))
1368 1369 @staticmethod
1370 - def web2py_extract(lookup, s):
1371 table = { 1372 'year': (0, 4), 1373 'month': (5, 7), 1374 'day': (8, 10), 1375 'hour': (11, 13), 1376 'minute': (14, 16), 1377 'second': (17, 19), 1378 } 1379 try: 1380 (i, j) = table[lookup] 1381 return int(s[i:j]) 1382 except: 1383 return None
1384
1385 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1386 credential_decoder=lambda x:x, driver_args={}):
1387 self.db = db 1388 self.dbengine = "sqlite" 1389 self.uri = uri 1390 self.pool_size = pool_size 1391 self.folder = folder 1392 self.db_codec = db_codec 1393 self.find_or_make_work_folder() 1394 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] 1395 if uri.startswith('sqlite:memory'): 1396 dbpath = ':memory:' 1397 else: 1398 dbpath = uri.split('://')[1] 1399 if dbpath[0] != '/': 1400 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1401 if not 'check_same_thread' in driver_args: 1402 driver_args['check_same_thread'] = False 1403 def connect(dbpath=dbpath, driver_args=driver_args): 1404 return sqlite3.Connection(dbpath, **driver_args)
1405 self.pool_connection(connect) 1406 self.cursor = self.connection.cursor() 1407 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1408
1409 - def _truncate(self,table,mode = ''):
1410 tablename = table._tablename 1411 return ['DELETE FROM %s;' % tablename, 1412 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
1413
1414 - def lastrowid(self,table):
1415 return self.cursor.lastrowid
1416 1417
1418 -class JDBCSQLiteAdapter(SQLiteAdapter):
1419
1420 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1421 credential_decoder=lambda x:x, driver_args={}):
1422 self.db = db 1423 self.dbengine = "sqlite" 1424 self.uri = uri 1425 self.pool_size = pool_size 1426 self.folder = folder 1427 self.db_codec = db_codec 1428 self.find_or_make_work_folder() 1429 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] 1430 if uri.startswith('sqlite:memory'): 1431 dbpath = ':memory:' 1432 else: 1433 dbpath = uri.split('://')[1] 1434 if dbpath[0] != '/': 1435 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1436 def connect(dbpath=dbpath,driver_args=driver_args): 1437 return zxJDBC.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath),**driver_args)
1438 self.pool_connection(connect) 1439 self.cursor = self.connection.cursor() 1440 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1441
1442 - def execute(self,a):
1443 return self.log_execute(a[:-1])
1444 1445
1446 -class MySQLAdapter(BaseAdapter):
1447 1448 driver = globals().get('pymysql',None) 1449 maxcharlength = 255 1450 commit_on_alter_table = True 1451 support_distributed_transaction = True 1452 types = { 1453 'boolean': 'CHAR(1)', 1454 'string': 'VARCHAR(%(length)s)', 1455 'text': 'LONGTEXT', 1456 'password': 'VARCHAR(%(length)s)', 1457 'blob': 'LONGBLOB', 1458 'upload': 'VARCHAR(%(length)s)', 1459 'integer': 'INT', 1460 'double': 'DOUBLE', 1461 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1462 'date': 'DATE', 1463 'time': 'TIME', 1464 'datetime': 'DATETIME', 1465 'id': 'INT AUTO_INCREMENT NOT NULL', 1466 'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1467 'list:integer': 'LONGTEXT', 1468 'list:string': 'LONGTEXT', 1469 'list:reference': 'LONGTEXT', 1470 } 1471
1472 - def RANDOM(self):
1473 return 'RAND()'
1474
1475 - def SUBSTRING(self,field,parameters):
1476 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1477
1478 - def _drop(self,table,mode):
1479 # breaks db integrity but without this mysql does not drop table 1480 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;']
1481
1482 - def distributed_transaction_begin(self,key):
1483 self.execute('XA START;')
1484
1485 - def prepare(self,key):
1486 self.execute("XA END;") 1487 self.execute("XA PREPARE;")
1488
1489 - def commit_prepared(self,ley):
1490 self.execute("XA COMMIT;")
1491
1492 - def rollback_prepared(self,key):
1493 self.execute("XA ROLLBACK;")
1494
1495 - def concat_add(self,table):
1496 return '; ALTER TABLE %s ADD ' % table
1497
1498 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1499 credential_decoder=lambda x:x, driver_args={}):
1500 self.db = db 1501 self.dbengine = "mysql" 1502 self.uri = uri 1503 self.pool_size = pool_size 1504 self.folder = folder 1505 self.db_codec = db_codec 1506 self.find_or_make_work_folder() 1507 uri = uri.split('://')[1] 1508 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 1509 if not m: 1510 raise SyntaxError, \ 1511 "Invalid URI string in DAL: %s" % self.uri 1512 user = m.group('user') 1513 if not user: 1514 raise SyntaxError, 'User required' 1515 password = m.group('password') 1516 if not password: 1517 password = '' 1518 host = m.group('host') 1519 if not host: 1520 raise SyntaxError, 'Host name required' 1521 db = m.group('db') 1522 if not db: 1523 raise SyntaxError, 'Database name required' 1524 port = int(m.group('port') or '3306') 1525 charset = m.group('charset') or 'utf8' 1526 driver_args.update(dict(db=db, 1527 user=credential_decoder(user), 1528 passwd=credential_decoder(password), 1529 host=host, 1530 port=port, 1531 charset=charset)) 1532 def connect(driver_args=driver_args): 1533 return self.driver.connect(**driver_args)
1534 self.pool_connection(connect) 1535 self.cursor = self.connection.cursor() 1536 self.execute('SET FOREIGN_KEY_CHECKS=1;') 1537 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
1538
1539 - def lastrowid(self,table):
1540 self.execute('select last_insert_id();') 1541 return int(self.cursor.fetchone()[0])
1542 1543
1544 -class PostgreSQLAdapter(BaseAdapter):
1545 1546 support_distributed_transaction = True 1547 types = { 1548 'boolean': 'CHAR(1)', 1549 'string': 'VARCHAR(%(length)s)', 1550 'text': 'TEXT', 1551 'password': 'VARCHAR(%(length)s)', 1552 'blob': 'BYTEA', 1553 'upload': 'VARCHAR(%(length)s)', 1554 'integer': 'INTEGER', 1555 'double': 'FLOAT8', 1556 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1557 'date': 'DATE', 1558 'time': 'TIME', 1559 'datetime': 'TIMESTAMP', 1560 'id': 'SERIAL PRIMARY KEY', 1561 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1562 'list:integer': 'TEXT', 1563 'list:string': 'TEXT', 1564 'list:reference': 'TEXT', 1565 } 1566
1567 - def sequence_name(self,table):
1568 return '%s_id_Seq' % table
1569
1570 - def RANDOM(self):
1571 return 'RANDOM()'
1572
1573 - def distributed_transaction_begin(self,key):
1574 return
1575
1576 - def prepare(self,key):
1577 self.execute("PREPARE TRANSACTION '%s';" % key)
1578
1579 - def commit_prepared(self,key):
1580 self.execute("COMMIT PREPARED '%s';" % key)
1581
1582 - def rollback_prepared(self,key):
1583 self.execute("ROLLBACK PREPARED '%s';" % key)
1584
1585 - def create_sequence_and_triggers(self, query, table, **args):
1586 # following lines should only be executed if table._sequence_name does not exist 1587 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 1588 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 1589 # % (table._tablename, table._fieldname, table._sequence_name)) 1590 self.execute(query)
1591
1592 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1593 credential_decoder=lambda x:x, driver_args={}):
1594 self.db = db 1595 self.dbengine = "postgres" 1596 self.uri = uri 1597 self.pool_size = pool_size 1598 self.folder = folder 1599 self.db_codec = db_codec 1600 self.find_or_make_work_folder() 1601 uri = uri.split('://')[1] 1602 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 1603 if not m: 1604 raise SyntaxError, "Invalid URI string in DAL" 1605 user = m.group('user') 1606 if not user: 1607 raise SyntaxError, 'User required' 1608 password = m.group('password') 1609 if not password: 1610 password = '' 1611 host = m.group('host') 1612 if not host: 1613 raise SyntaxError, 'Host name required' 1614 db = m.group('db') 1615 if not db: 1616 raise SyntaxError, 'Database name required' 1617 port = m.group('port') or '5432' 1618 sslmode = m.group('sslmode') 1619 if sslmode: 1620 msg = ("dbname='%s' user='%s' host='%s'" 1621 "port=%s password='%s' sslmode='%s'") \ 1622 % (db, user, host, port, password, sslmode) 1623 else: 1624 msg = ("dbname='%s' user='%s' host='%s'" 1625 "port=%s password='%s'") \ 1626 % (db, user, host, port, password) 1627 def connect(msg=msg,driver_args=driver_args): 1628 return psycopg2.connect(msg,**driver_args)
1629 self.pool_connection(connect) 1630 self.connection.set_client_encoding('UTF8') 1631 self.cursor = self.connection.cursor() 1632 self.execute('BEGIN;') 1633 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 1634 self.execute("SET standard_conforming_strings=on;")
1635
1636 - def lastrowid(self,table):
1637 self.execute("select currval('%s')" % table._sequence_name) 1638 return int(self.cursor.fetchone()[0])
1639
1640 - def LIKE(self,first,second):
1641 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string'))
1642
1643 - def STARTSWITH(self,first,second):
1644 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
1645
1646 - def ENDSWITH(self,first,second):
1647 return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
1648
1649 - def CONTAINS(self,first,second):
1650 if first.type in ('string','text'): 1651 key = '%'+str(second).replace('%','%%')+'%' 1652 elif first.type.startswith('list:'): 1653 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 1654 return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
1655
1656 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
1657
1658 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1659 credential_decoder=lambda x:x, driver_args={}):
1660 self.db = db 1661 self.dbengine = "postgres" 1662 self.uri = uri 1663 self.pool_size = pool_size 1664 self.folder = folder 1665 self.db_codec = db_codec 1666 self.find_or_make_work_folder() 1667 uri = uri.split('://')[1] 1668 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 1669 if not m: 1670 raise SyntaxError, "Invalid URI string in DAL" 1671 user = m.group('user') 1672 if not user: 1673 raise SyntaxError, 'User required' 1674 password = m.group('password') 1675 if not password: 1676 password = '' 1677 host = m.group('host') 1678 if not host: 1679 raise SyntaxError, 'Host name required' 1680 db = m.group('db') 1681 if not db: 1682 raise SyntaxError, 'Database name required' 1683 port = m.group('port') or '5432' 1684 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 1685 def connect(msg=msg,driver_args=driver_args): 1686 return zxJDBC.connect(*msg,**driver_args)
1687 self.pool_connection(connect) 1688 self.connection.set_client_encoding('UTF8') 1689 self.cursor = self.connection.cursor() 1690 self.execute('BEGIN;') 1691 self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
1692 1693
1694 -class OracleAdapter(BaseAdapter):
1695 commit_on_alter_table = False 1696 types = { 1697 'boolean': 'CHAR(1)', 1698 'string': 'VARCHAR2(%(length)s)', 1699 'text': 'CLOB', 1700 'password': 'VARCHAR2(%(length)s)', 1701 'blob': 'CLOB', 1702 'upload': 'VARCHAR2(%(length)s)', 1703 'integer': 'INT', 1704 'double': 'FLOAT', 1705 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1706 'date': 'DATE', 1707 'time': 'CHAR(8)', 1708 'datetime': 'DATE', 1709 'id': 'NUMBER PRIMARY KEY', 1710 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1711 'list:integer': 'CLOB', 1712 'list:string': 'CLOB', 1713 'list:reference': 'CLOB', 1714 } 1715
1716 - def sequence_name(self,tablename):
1717 return '%s_sequence' % tablename
1718
1719 - def trigger_name(self,tablename):
1720 return '%s_trigger' % tablename
1721
1722 - def LEFT_JOIN(self):
1723 return 'LEFT OUTER JOIN'
1724
1725 - def RANDOM(self):
1726 return 'dbms_random.value'
1727
1728 - def NOT_NULL(self,default,field_type):
1729 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
1730
1731 - def _drop(self,table,mode):
1732 sequence_name = table._sequence_name 1733 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
1734
1735 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1736 if limitby: 1737 (lmin, lmax) = limitby 1738 if len(sql_w) > 1: 1739 sql_w_row = sql_w + ' AND w_row > %i' % lmin 1740 else: 1741 sql_w_row = 'WHERE w_row > %i' % lmin 1742 return '%s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 1743 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1744
1745 - def constraint_name(self, tablename, fieldname):
1746 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 1747 if len(constraint_name)>30: 1748 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 1749 return constraint_name
1750
1751 - def represent_exceptions(self, obj, fieldtype):
1752 if fieldtype == 'blob': 1753 obj = base64.b64encode(str(obj)) 1754 return ":CLOB('%s')" % obj 1755 elif fieldtype == 'date': 1756 if isinstance(obj, (datetime.date, datetime.datetime)): 1757 obj = obj.isoformat()[:10] 1758 else: 1759 obj = str(obj) 1760 return "to_date('%s','yyyy-mm-dd')" % obj 1761 elif fieldtype == 'datetime': 1762 if isinstance(obj, datetime.datetime): 1763 obj = obj.isoformat()[:19].replace('T',' ') 1764 elif isinstance(obj, datetime.date): 1765 obj = obj.isoformat()[:10]+' 00:00:00' 1766 else: 1767 obj = str(obj) 1768 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 1769 return None
1770
1771 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1772 credential_decoder=lambda x:x, driver_args={}):
1773 self.db = db 1774 self.dbengine = "oracle" 1775 self.uri = uri 1776 self.pool_size = pool_size 1777 self.folder = folder 1778 self.db_codec = db_codec 1779 self.find_or_make_work_folder() 1780 uri = uri.split('://')[1] 1781 if not 'threaded' in driver_args: 1782 driver_args['threaded']=True 1783 def connect(uri=uri,driver_args=driver_args): 1784 return cx_Oracle.connect(uri,**driver_args)
1785 self.pool_connection(connect) 1786 self.cursor = self.connection.cursor() 1787 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 1788 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
1789 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 1790
1791 - def execute(self, command):
1792 args = [] 1793 i = 1 1794 while True: 1795 m = self.oracle_fix.match(command) 1796 if not m: 1797 break 1798 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 1799 args.append(m.group('clob')[6:-2].replace("''", "'")) 1800 i += 1 1801 return self.log_execute(command[:-1], args)
1802
1803 - def create_sequence_and_triggers(self, query, table, **args):
1804 tablename = table._tablename 1805 sequence_name = table._sequence_name 1806 trigger_name = table._trigger_name 1807 self.execute(query) 1808 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name) 1809 self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name))
1810
1811 - def lastrowid(self,table):
1812 sequence_name = table._sequence_name 1813 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 1814 return int(self.cursor.fetchone()[0])
1815 1816
1817 -class MSSQLAdapter(BaseAdapter):
1818 types = { 1819 'boolean': 'BIT', 1820 'string': 'VARCHAR(%(length)s)', 1821 'text': 'TEXT', 1822 'password': 'VARCHAR(%(length)s)', 1823 'blob': 'IMAGE', 1824 'upload': 'VARCHAR(%(length)s)', 1825 'integer': 'INT', 1826 'double': 'FLOAT', 1827 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1828 'date': 'DATETIME', 1829 'time': 'CHAR(8)', 1830 'datetime': 'DATETIME', 1831 'id': 'INT IDENTITY PRIMARY KEY', 1832 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1833 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1834 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 1835 'list:integer': 'TEXT', 1836 'list:string': 'TEXT', 1837 'list:reference': 'TEXT', 1838 } 1839
1840 - def EXTRACT(self,field,what):
1841 return "DATEPART('%s' FROM %s)" % (what, self.expand(field))
1842
1843 - def LEFT_JOIN(self):
1844 return 'LEFT OUTER JOIN'
1845
1846 - def RANDOM(self):
1847 return 'NEWID()'
1848
1849 - def ALLOW_NULL(self):
1850 return ' NULL'
1851
1852 - def SUBSTRING(self,field,parameters):
1853 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1854
1855 - def PRIMARY_KEY(self,key):
1856 return 'PRIMARY KEY CLUSTERED (%s)' % key
1857
1858 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1859 if limitby: 1860 (lmin, lmax) = limitby 1861 sql_s += ' TOP %i' % lmax 1862 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1863
1864 - def represent_exceptions(self, obj, fieldtype):
1865 if fieldtype == 'boolean': 1866 if obj and not str(obj)[0].upper() == 'F': 1867 return '1' 1868 else: 1869 return '0' 1870 return None
1871
1872 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1873 credential_decoder=lambda x:x, driver_args={}, fake_connect=False):
1874 self.db = db 1875 self.dbengine = "mssql" 1876 self.uri = uri 1877 self.pool_size = pool_size 1878 self.folder = folder 1879 self.db_codec = db_codec 1880 self.find_or_make_work_folder() 1881 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 1882 uri = uri.split('://')[1] 1883 if '@' not in uri: 1884 try: 1885 m = re.compile('^(?P<dsn>.+)$').match(uri) 1886 if not m: 1887 raise SyntaxError, \ 1888 'Parsing uri string(%s) has no result' % self.uri 1889 dsn = m.group('dsn') 1890 if not dsn: 1891 raise SyntaxError, 'DSN required' 1892 except SyntaxError, e: 1893 logger.error('NdGpatch error') 1894 raise e 1895 cnxn = 'DSN=%s' % dsn 1896 else: 1897 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri) 1898 if not m: 1899 raise SyntaxError, \ 1900 "Invalid URI string in DAL: %s" % uri 1901 user = m.group('user') 1902 if not user: 1903 raise SyntaxError, 'User required' 1904 password = m.group('password') 1905 if not password: 1906 password = '' 1907 host = m.group('host') 1908 if not host: 1909 raise SyntaxError, 'Host name required' 1910 db = m.group('db') 1911 if not db: 1912 raise SyntaxError, 'Database name required' 1913 port = m.group('port') or '1433' 1914 # Parse the optional url name-value arg pairs after the '?' 1915 # (in the form of arg1=value1&arg2=value2&...) 1916 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 1917 argsdict = { 'DRIVER':'{SQL Server}' } 1918 urlargs = m.group('urlargs') or '' 1919 argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 1920 for argmatch in argpattern.finditer(urlargs): 1921 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 1922 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()]) 1923 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 1924 % (host, port, db, user, password, urlargs) 1925 def connect(cnxn=cnxn,driver_args=driver_args): 1926 return pyodbc.connect(cnxn,**driver_args)
1927 if not fake_connect: 1928 self.pool_connection(connect) 1929 self.cursor = self.connection.cursor()
1930
1931 - def lastrowid(self,table):
1932 #self.execute('SELECT @@IDENTITY;') 1933 self.execute('SELECT SCOPE_IDENTITY();') 1934 return int(self.cursor.fetchone()[0])
1935
1936 - def integrity_error_class(self):
1937 return pyodbc.IntegrityError
1938
1939 - def rowslice(self,rows,minimum=0,maximum=None):
1940 if maximum is None: 1941 return rows[minimum:] 1942 return rows[minimum:maximum]
1943 1944
1945 -class MSSQL2Adapter(MSSQLAdapter):
1946 types = { 1947 'boolean': 'CHAR(1)', 1948 'string': 'NVARCHAR(%(length)s)', 1949 'text': 'NTEXT', 1950 'password': 'NVARCHAR(%(length)s)', 1951 'blob': 'IMAGE', 1952 'upload': 'NVARCHAR(%(length)s)', 1953 'integer': 'INT', 1954 'double': 'FLOAT', 1955 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1956 'date': 'DATETIME', 1957 'time': 'CHAR(8)', 1958 'datetime': 'DATETIME', 1959 'id': 'INT IDENTITY PRIMARY KEY', 1960 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1961 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1962 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 1963 'list:integer': 'NTEXT', 1964 'list:string': 'NTEXT', 1965 'list:reference': 'NTEXT', 1966 } 1967
1968 - def represent(self, obj, fieldtype):
1969 value = BaseAdapter.represent(self, obj, fieldtype) 1970 if fieldtype == 'string' or fieldtype == 'text' and value[:1]=="'": 1971 value = 'N'+value 1972 return value
1973
1974 - def execute(self,a):
1975 return self.log_execute(a,'utf8')
1976 1977
1978 -class FireBirdAdapter(BaseAdapter):
1979 1980 commit_on_alter_table = False 1981 support_distributed_transaction = True 1982 types = { 1983 'boolean': 'CHAR(1)', 1984 'string': 'VARCHAR(%(length)s)', 1985 'text': 'BLOB SUB_TYPE 1', 1986 'password': 'VARCHAR(%(length)s)', 1987 'blob': 'BLOB SUB_TYPE 0', 1988 'upload': 'VARCHAR(%(length)s)', 1989 'integer': 'INTEGER', 1990 'double': 'DOUBLE PRECISION', 1991 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 1992 'date': 'DATE', 1993 'time': 'TIME', 1994 'datetime': 'TIMESTAMP', 1995 'id': 'INTEGER PRIMARY KEY', 1996 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1997 'list:integer': 'BLOB SUB_TYPE 1', 1998 'list:string': 'BLOB SUB_TYPE 1', 1999 'list:reference': 'BLOB SUB_TYPE 1', 2000 } 2001
2002 - def sequence_name(self,tablename):
2003 return 'genid_%s' % tablename
2004
2005 - def trigger_name(self,tablename):
2006 return 'trg_id_%s' % tablename
2007
2008 - def RANDOM(self):
2009 return 'RAND()'
2010
2011 - def NOT_NULL(self,default,field_type):
2012 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2013
2014 - def SUBSTRING(self,field,parameters):
2015 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
2016
2017 - def _drop(self,table,mode):
2018 sequence_name = table._sequence_name 2019 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
2020
2021 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2022 if limitby: 2023 (lmin, lmax) = limitby 2024 sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin) 2025 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2026
2027 - def _truncate(self,table,mode = ''):
2028 return ['DELETE FROM %s;' % table._tablename, 2029 'SET GENERATOR %s TO 0;' % table._sequence_name]
2030
2031 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2032 credential_decoder=lambda x:x, driver_args={}):
2033 self.db = db 2034 self.dbengine = "firebird" 2035 self.uri = uri 2036 self.pool_size = pool_size 2037 self.folder = folder 2038 self.db_codec = db_codec 2039 self.find_or_make_work_folder() 2040 uri = uri.split('://')[1] 2041 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2042 if not m: 2043 raise SyntaxError, "Invalid URI string in DAL: %s" % uri 2044 user = m.group('user') 2045 if not user: 2046 raise SyntaxError, 'User required' 2047 password = m.group('password') 2048 if not password: 2049 password = '' 2050 host = m.group('host') 2051 if not host: 2052 raise SyntaxError, 'Host name required' 2053 port = int(m.group('port') or 3050) 2054 db = m.group('db') 2055 if not db: 2056 raise SyntaxError, 'Database name required' 2057 charset = m.group('charset') or 'UTF8' 2058 driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db), 2059 user = credential_decoder(user), 2060 password = credential_decoder(password), 2061 charset = charset)) 2062 def connect(driver_args=driver_args): 2063 return kinterbasdb.connect(**driver_args)
2064 self.pool_connection(connect) 2065 2066 self.cursor = self.connection.cursor()
2067
2068 - def create_sequence_and_triggers(self, query, table, **args):
2069 tablename = table._tablename 2070 sequence_name = table._sequence_name 2071 trigger_name = table._trigger_name 2072 self.execute(query) 2073 self.execute('create generator %s;' % sequence_name) 2074 self.execute('set generator %s to 0;' % sequence_name) 2075 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
2076
2077 - def lastrowid(self,table):
2078 sequence_name = table._sequence_name 2079 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 2080 return int(self.cursor.fetchone()[0])
2081 2082
2083 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
2084
2085 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2086 credential_decoder=lambda x:x, driver_args={}):
2087 self.db = db 2088 self.dbengine = "firebird" 2089 self.uri = uri 2090 self.pool_size = pool_size 2091 self.folder = folder 2092 self.db_codec = db_codec 2093 self.find_or_make_work_folder() 2094 uri = uri.split('://')[1] 2095 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2096 if not m: 2097 raise SyntaxError, \ 2098 "Invalid URI string in DAL: %s" % self.uri 2099 user = m.group('user') 2100 if not user: 2101 raise SyntaxError, 'User required' 2102 password = m.group('password') 2103 if not password: 2104 password = '' 2105 pathdb = m.group('path') 2106 if not pathdb: 2107 raise SyntaxError, 'Path required' 2108 charset = m.group('charset') 2109 if not charset: 2110 charset = 'UTF8' 2111 host = '' 2112 driver_args.update(dict(host=host, 2113 database=pathdb, 2114 user=credential_decoder(user), 2115 password=credential_decoder(password), 2116 charset=charset)) 2117 def connect(driver_args=driver_args): 2118 return kinterbasdb.connect(**driver_args)
2119 self.pool_connection(connect) 2120 self.cursor = self.connection.cursor()
2121 2122
2123 -class InformixAdapter(BaseAdapter):
2124 types = { 2125 'boolean': 'CHAR(1)', 2126 'string': 'VARCHAR(%(length)s)', 2127 'text': 'BLOB SUB_TYPE 1', 2128 'password': 'VARCHAR(%(length)s)', 2129 'blob': 'BLOB SUB_TYPE 0', 2130 'upload': 'VARCHAR(%(length)s)', 2131 'integer': 'INTEGER', 2132 'double': 'FLOAT', 2133 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2134 'date': 'DATE', 2135 'time': 'CHAR(8)', 2136 'datetime': 'DATETIME', 2137 'id': 'SERIAL', 2138 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2139 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 2140 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 2141 'list:integer': 'BLOB SUB_TYPE 1', 2142 'list:string': 'BLOB SUB_TYPE 1', 2143 'list:reference': 'BLOB SUB_TYPE 1', 2144 } 2145
2146 - def RANDOM(self):
2147 return 'Random()'
2148
2149 - def NOT_NULL(self,default,field_type):
2150 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2151
2152 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2153 if limitby: 2154 (lmin, lmax) = limitby 2155 fetch_amt = lmax - lmin 2156 dbms_version = int(self.connection.dbms_version.split('.')[0]) 2157 if lmin and (dbms_version >= 10): 2158 # Requires Informix 10.0+ 2159 sql_s += ' SKIP %d' % (lmin, ) 2160 if fetch_amt and (dbms_version >= 9): 2161 # Requires Informix 9.0+ 2162 sql_s += ' FIRST %d' % (fetch_amt, ) 2163 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2164
2165 - def represent_exceptions(self, obj, fieldtype):
2166 if fieldtype == 'date': 2167 if isinstance(obj, (datetime.date, datetime.datetime)): 2168 obj = obj.isoformat()[:10] 2169 else: 2170 obj = str(obj) 2171 return "to_date('%s','yyyy-mm-dd')" % obj 2172 elif fieldtype == 'datetime': 2173 if isinstance(obj, datetime.datetime): 2174 obj = obj.isoformat()[:19].replace('T',' ') 2175 elif isinstance(obj, datetime.date): 2176 obj = obj.isoformat()[:10]+' 00:00:00' 2177 else: 2178 obj = str(obj) 2179 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2180 return None
2181
2182 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2183 credential_decoder=lambda x:x, driver_args={}):
2184 self.db = db 2185 self.dbengine = "informix" 2186 self.uri = uri 2187 self.pool_size = pool_size 2188 self.folder = folder 2189 self.db_codec = db_codec 2190 self.find_or_make_work_folder() 2191 uri = uri.split('://')[1] 2192 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 2193 if not m: 2194 raise SyntaxError, \ 2195 "Invalid URI string in DAL: %s" % self.uri 2196 user = m.group('user') 2197 if not user: 2198 raise SyntaxError, 'User required' 2199 password = m.group('password') 2200 if not password: 2201 password = '' 2202 host = m.group('host') 2203 if not host: 2204 raise SyntaxError, 'Host name required' 2205 db = m.group('db') 2206 if not db: 2207 raise SyntaxError, 'Database name required' 2208 user = credential_decoder(user) 2209 password = credential_decoder(password) 2210 dsn = '%s@%s' % (db,host) 2211 driver_args.update(dict(user=user,password=password,autocommit=True)) 2212 def connect(dsn=dsn,driver_args=driver_args): 2213 return informixdb.connect(dsn,**driver_args)
2214 self.pool_connection(connect) 2215 self.cursor = self.connection.cursor()
2216
2217 - def execute(self,command):
2218 if command[-1:]==';': 2219 command = command[:-1] 2220 return self.log_execute(command)
2221
2222 - def lastrowid(self,table):
2223 return self.cursor.sqlerrd[1]
2224
2225 - def integrity_error_class(self):
2226 return informixdb.IntegrityError
2227 2228
2229 -class DB2Adapter(BaseAdapter):
2230 types = { 2231 'boolean': 'CHAR(1)', 2232 'string': 'VARCHAR(%(length)s)', 2233 'text': 'CLOB', 2234 'password': 'VARCHAR(%(length)s)', 2235 'blob': 'BLOB', 2236 'upload': 'VARCHAR(%(length)s)', 2237 'integer': 'INT', 2238 'double': 'DOUBLE', 2239 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2240 'date': 'DATE', 2241 'time': 'TIME', 2242 'datetime': 'TIMESTAMP', 2243 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2244 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2245 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2246 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2247 'list:integer': 'CLOB', 2248 'list:string': 'CLOB', 2249 'list:reference': 'CLOB', 2250 } 2251
2252 - def LEFT_JOIN(self):
2253 return 'LEFT OUTER JOIN'
2254
2255 - def RANDOM(self):
2256 return 'RAND()'
2257
2258 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2259 if limitby: 2260 (lmin, lmax) = limitby 2261 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 2262 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2263
2264 - def represent_exceptions(self, obj, fieldtype):
2265 if fieldtype == 'blob': 2266 obj = base64.b64encode(str(obj)) 2267 return "BLOB('%s')" % obj 2268 elif fieldtype == 'datetime': 2269 if isinstance(obj, datetime.datetime): 2270 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 2271 elif isinstance(obj, datetime.date): 2272 obj = obj.isoformat()[:10]+'-00.00.00' 2273 return "'%s'" % obj 2274 return None
2275
2276 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2277 credential_decoder=lambda x:x, driver_args={}):
2278 self.db = db 2279 self.dbengine = "db2" 2280 self.uri = uri 2281 self.pool_size = pool_size 2282 self.folder = folder 2283 self.db_codec = db_codec 2284 self.find_or_make_work_folder() 2285 cnxn = uri.split('://', 1)[1] 2286 def connect(cnxn=cnxn,driver_args=driver_args): 2287 return pyodbc.connect(cnxn,**driver_args)
2288 self.pool_connection(connect) 2289 self.cursor = self.connection.cursor()
2290
2291 - def execute(self,command):
2292 if command[-1:]==';': 2293 command = command[:-1] 2294 return self.log_execute(command)
2295
2296 - def lastrowid(self,table):
2297 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 2298 return int(self.cursor.fetchone()[0])
2299
2300 - def rowslice(self,rows,minimum=0,maximum=None):
2301 if maximum is None: 2302 return rows[minimum:] 2303 return rows[minimum:maximum]
2304 2305 2306 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name 2307 # (ANSI-SQL wants this form of name 2308 # to be a delimited identifier) 2309
2310 -class IngresAdapter(BaseAdapter):
2311 2312 types = { 2313 'boolean': 'CHAR(1)', 2314 'string': 'VARCHAR(%(length)s)', 2315 'text': 'CLOB', 2316 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2317 'blob': 'BLOB', 2318 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2319 'integer': 'INTEGER4', # or int8... 2320 'double': 'FLOAT8', 2321 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2322 'date': 'ANSIDATE', 2323 'time': 'TIME WITHOUT TIME ZONE', 2324 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2325 'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME, 2326 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2327 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2328 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2329 'list:integer': 'CLOB', 2330 'list:string': 'CLOB', 2331 'list:reference': 'CLOB', 2332 } 2333
2334 - def LEFT_JOIN(self):
2335 return 'LEFT OUTER JOIN'
2336
2337 - def RANDOM(self):
2338 return 'RANDOM()'
2339
2340 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2341 if limitby: 2342 (lmin, lmax) = limitby 2343 fetch_amt = lmax - lmin 2344 if fetch_amt: 2345 sql_s += ' FIRST %d ' % (fetch_amt, ) 2346 if lmin: 2347 # Requires Ingres 9.2+ 2348 sql_o += ' OFFSET %d' % (lmin, ) 2349 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2350
2351 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2352 credential_decoder=lambda x:x, driver_args={}):
2353 self.db = db 2354 self.dbengine = "ingres" 2355 self.uri = uri 2356 self.pool_size = pool_size 2357 self.folder = folder 2358 self.db_codec = db_codec 2359 self.find_or_make_work_folder() 2360 connstr = self._uri.split(':', 1)[1] 2361 # Simple URI processing 2362 connstr = connstr.lstrip() 2363 while connstr.startswith('/'): 2364 connstr = connstr[1:] 2365 database_name=connstr # Assume only (local) dbname is passed in 2366 vnode = '(local)' 2367 servertype = 'ingres' 2368 trace = (0, None) # No tracing 2369 driver_args.update(dict(database=database_name, 2370 vnode=vnode, 2371 servertype=servertype, 2372 trace=trace)) 2373 def connect(driver_args=driver_args): 2374 return ingresdbi.connect(**driver_args)
2375 self.pool_connection(connect) 2376 self.cursor = self.connection.cursor()
2377
2378 - def create_sequence_and_triggers(self, query, table, **args):
2379 # post create table auto inc code (if needed) 2380 # modify table to btree for performance.... 2381 # Older Ingres releases could use rule/trigger like Oracle above. 2382 if hasattr(table,'_primarykey'): 2383 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 2384 (table._tablename, 2385 ', '.join(["'%s'" % x for x in table.primarykey])) 2386 self.execute(modify_tbl_sql) 2387 else: 2388 tmp_seqname='%s_iisq' % table._tablename 2389 query=query.replace(INGRES_SEQNAME, tmp_seqname) 2390 self.execute('create sequence %s' % tmp_seqname) 2391 self.execute(query) 2392 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
2393 2394
2395 - def lastrowid(self,table):
2396 tmp_seqname='%s_iisq' % table 2397 self.execute('select current value for %s' % tmp_seqname) 2398 return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
2399
2400 - def integrity_error_class(self):
2401 return ingresdbi.IntegrityError
2402 2403
2404 -class IngresUnicodeAdapter(IngresAdapter):
2405 types = { 2406 'boolean': 'CHAR(1)', 2407 'string': 'NVARCHAR(%(length)s)', 2408 'text': 'NCLOB', 2409 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2410 'blob': 'BLOB', 2411 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2412 'integer': 'INTEGER4', # or int8... 2413 'double': 'FLOAT8', 2414 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2415 'date': 'ANSIDATE', 2416 'time': 'TIME WITHOUT TIME ZONE', 2417 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2418 'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME, 2419 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2420 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2421 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2422 'list:integer': 'NCLOB', 2423 'list:string': 'NCLOB', 2424 'list:reference': 'NCLOB', 2425 }
2426
2427 -class SAPDBAdapter(BaseAdapter):
2428 2429 support_distributed_transaction = False 2430 types = { 2431 'boolean': 'CHAR(1)', 2432 'string': 'VARCHAR(%(length)s)', 2433 'text': 'LONG', 2434 'password': 'VARCHAR(%(length)s)', 2435 'blob': 'LONG', 2436 'upload': 'VARCHAR(%(length)s)', 2437 'integer': 'INT', 2438 'double': 'FLOAT', 2439 'decimal': 'FIXED(%(precision)s,%(scale)s)', 2440 'date': 'DATE', 2441 'time': 'TIME', 2442 'datetime': 'TIMESTAMP', 2443 'id': 'INT PRIMARY KEY', 2444 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2445 'list:integer': 'LONG', 2446 'list:string': 'LONG', 2447 'list:reference': 'LONG', 2448 } 2449
2450 - def sequence_name(self,table):
2451 return '%s_id_Seq' % table
2452
2453 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2454 if limitby: 2455 (lmin, lmax) = limitby 2456 if len(sql_w) > 1: 2457 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2458 else: 2459 sql_w_row = 'WHERE w_row > %i' % lmin 2460 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2461 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2462
2463 - def create_sequence_and_triggers(self, query, table, **args):
2464 # following lines should only be executed if table._sequence_name does not exist 2465 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2466 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2467 % (table._tablename, table._id.name, table._sequence_name)) 2468 self.execute(query)
2469
2470 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2471 credential_decoder=lambda x:x, driver_args={}):
2472 self.db = db 2473 self.dbengine = "sapdb" 2474 self.uri = uri 2475 self.pool_size = pool_size 2476 self.folder = folder 2477 self.db_codec = db_codec 2478 self.find_or_make_work_folder() 2479 uri = uri.split('://')[1] 2480 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 2481 if not m: 2482 raise SyntaxError, "Invalid URI string in DAL" 2483 user = m.group('user') 2484 if not user: 2485 raise SyntaxError, 'User required' 2486 password = m.group('password') 2487 if not password: 2488 password = '' 2489 host = m.group('host') 2490 if not host: 2491 raise SyntaxError, 'Host name required' 2492 db = m.group('db') 2493 if not db: 2494 raise SyntaxError, 'Database name required' 2495 def connect(user=user,password=password,database=db,host=host,driver_args=driver_args): 2496 return sapdb.Connection(user,password,database,host,**driver_args)
2497 self.pool_connection(connect) 2498 # self.connection.set_client_encoding('UTF8') 2499 self.cursor = self.connection.cursor()
2500
2501 - def lastrowid(self,table):
2502 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 2503 return int(self.cursor.fetchone()[0])
2504 2505 2506 2507 ######## GAE MySQL ########## 2508
2509 -class DatabaseStoredFile:
2510 2511 web2py_filesystem = False 2512
2513 - def __init__(self,db,filename,mode):
2514 if db._adapter.dbengine != 'mysql': 2515 raise RuntimeError, "only MySQL can store metadata .table files in database for now" 2516 self.db = db 2517 self.filename = filename 2518 self.mode = mode 2519 if not self.web2py_filesystem: 2520 self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;") 2521 DatabaseStoredFile.web2py_filesystem = True 2522 self.p=0 2523 self.data = '' 2524 if mode in ('r','rw','a'): 2525 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" % filename 2526 rows = self.db.executesql(query) 2527 if rows: 2528 self.data = rows[0][0] 2529 elif os.path.exists(filename): 2530 self.data = open(filename,'r').read() 2531 elif mode in ('r','rw'): 2532 raise RuntimeError, "File %s does not exist" % filename
2533
2534 - def read(self, bytes):
2535 data = self.data[self.p:self.p+bytes] 2536 self.p += len(data) 2537 return data
2538
2539 - def readline(self):
2540 i = self.data.find('\n',self.p)+1 2541 if i>0: 2542 data, self.p = self.data[self.p:i], i 2543 else: 2544 data, self.p = self.data[self.p:], len(self.data) 2545 return data
2546
2547 - def write(self,data):
2548 self.data += data
2549
2550 - def close(self):
2551 self.db.executesql("DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 2552 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')" % \ 2553 (self.filename, self.data.replace("'","''")) 2554 self.db.executesql(query) 2555 self.db.commit()
2556 2557 @staticmethod
2558 - def exists(db,filename):
2559 if os.path.exists(filename): 2560 return True 2561 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 2562 if db.executesql(query): 2563 return True 2564 return False
2565 2566
2567 -class UseDatabaseStoredFile:
2568
2569 - def file_exists(self, filename):
2570 return DatabaseStoredFile.exists(self.db,filename)
2571
2572 - def file_open(self, filename, mode='rb', lock=True):
2573 return DatabaseStoredFile(self.db,filename,mode)
2574
2575 - def file_close(self, fileobj, unlock=True):
2576 fileobj.close()
2577
2578 - def file_delete(self,filename):
2579 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 2580 self.db.executesql(query) 2581 self.db.commit()
2582
2583 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
2584
2585 - def __init__(self, db, uri='google:sql://realm:domain/database', pool_size=0, 2586 folder=None, db_codec='UTF-8', check_reserved=None, 2587 migrate=True, fake_migrate=False, 2588 credential_decoder = lambda x:x, driver_args={}):
2589 2590 self.db = db 2591 self.dbengine = "mysql" 2592 self.uri = uri 2593 self.pool_size = pool_size 2594 self.folder = folder 2595 self.db_codec = db_codec 2596 self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1] 2597 2598 m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):]) 2599 if not m: 2600 raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri 2601 instance = credential_decoder(m.group('instance')) 2602 db = credential_decoder(m.group('db')) 2603 driver_args['instance'] = instance 2604 if not migrate: 2605 driver_args['database'] = db 2606 def connect(driver_args=driver_args): 2607 return rdbms.connect(**driver_args)
2608 self.pool_connection(connect) 2609 self.cursor = self.connection.cursor() 2610 if migrate: 2611 # self.execute('DROP DATABASE %s' % db) 2612 self.execute('CREATE DATABASE IF NOT EXISTS %s' % db) 2613 self.execute('USE %s' % db) 2614 self.execute("SET FOREIGN_KEY_CHECKS=1;") 2615 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2616
2617 -class NoSQLAdapter(BaseAdapter):
2618
2619 - def represent(self, obj, fieldtype):
2620 if isinstance(obj,CALLABLETYPES): 2621 obj = obj() 2622 if isinstance(fieldtype, SQLCustomType): 2623 return fieldtype.encoder(obj) 2624 if isinstance(obj, (Expression, Field)): 2625 raise SyntaxError, "non supported on GAE" 2626 if self.dbengine=='google:datastore' in globals(): 2627 if isinstance(fieldtype, gae.Property): 2628 return obj 2629 if fieldtype.startswith('list:'): 2630 if not obj: 2631 obj = [] 2632 if not isinstance(obj, (list, tuple)): 2633 obj = [obj] 2634 if obj == '' and not fieldtype[:2] in ['st','te','pa','up']: 2635 return None 2636 if obj != None: 2637 if isinstance(obj, list) and not fieldtype.startswith('list'): 2638 obj = [self.represent(o, fieldtype) for o in obj] 2639 elif fieldtype in ('integer','id'): 2640 obj = long(obj) 2641 elif fieldtype == 'double': 2642 obj = float(obj) 2643 elif fieldtype.startswith('reference'): 2644 if isinstance(obj, (Row, Reference)): 2645 obj = obj['id'] 2646 obj = long(obj) 2647 elif fieldtype == 'boolean': 2648 if obj and not str(obj)[0].upper() == 'F': 2649 obj = True 2650 else: 2651 obj = False 2652 elif fieldtype == 'date': 2653 if not isinstance(obj, datetime.date): 2654 (y, m, d) = [int(x) for x in str(obj).strip().split('-')] 2655 obj = datetime.date(y, m, d) 2656 elif isinstance(obj,datetime.datetime): 2657 (y, m, d) = (obj.year, obj.month, obj.day) 2658 obj = datetime.date(y, m, d) 2659 elif fieldtype == 'time': 2660 if not isinstance(obj, datetime.time): 2661 time_items = [int(x) for x in str(obj).strip().split(':')[:3]] 2662 if len(time_items) == 3: 2663 (h, mi, s) = time_items 2664 else: 2665 (h, mi, s) = time_items + [0] 2666 obj = datetime.time(h, mi, s) 2667 elif fieldtype == 'datetime': 2668 if not isinstance(obj, datetime.datetime): 2669 (y, m, d) = [int(x) for x in str(obj)[:10].strip().split('-')] 2670 time_items = [int(x) for x in str(obj)[11:].strip().split(':')[:3]] 2671 while len(time_items)<3: 2672 time_items.append(0) 2673 (h, mi, s) = time_items 2674 obj = datetime.datetime(y, m, d, h, mi, s) 2675 elif fieldtype == 'blob': 2676 pass 2677 elif fieldtype.startswith('list:string'): 2678 if obj!=None and not isinstance(obj,(list,tuple)): 2679 obj=[obj] 2680 return [str(x) for x in obj] 2681 elif fieldtype.startswith('list:'): 2682 if obj!=None and not isinstance(obj,(list,tuple)): 2683 obj=[obj] 2684 return [int(x) for x in obj] 2685 elif isinstance(obj, str): 2686 obj = obj.decode('utf8') 2687 elif not isinstance(obj, unicode): 2688 obj = unicode(obj) 2689 return obj
2690
2691 - def _insert(self,table,fields):
2692 return 'insert %s in %s' % (fields, table)
2693
2694 - def _count(self,query,distinct=None):
2695 return 'count %s' % repr(query)
2696
2697 - def _select(self,query,fields,attributes):
2698 return 'select %s where %s' % (repr(fields), repr(query))
2699
2700 - def _delete(self,tablename, query):
2701 return 'delete %s where %s' % (repr(tablename),repr(query))
2702
2703 - def _update(self,tablename,query,fields):
2704 return 'update %s (%s) where %s' % (repr(tablename), 2705 repr(fields),repr(query))
2706
2707 - def commit(self):
2708 """ 2709 remember: no transactions on many NoSQL 2710 """ 2711 pass
2712
2713 - def rollback(self):
2714 """ 2715 remember: no transactions on many NoSQL 2716 """ 2717 pass
2718
2719 - def close(self):
2720 """ 2721 remember: no transactions on many NoSQL 2722 """ 2723 pass
2724 2725 2726 # these functions should never be called!
2727 - def OR(self,first,second): raise SyntaxError, "Not supported"
2728 - def AND(self,first,second): raise SyntaxError, "Not supported"
2729 - def AS(self,first,second): raise SyntaxError, "Not supported"
2730 - def ON(self,first,second): raise SyntaxError, "Not supported"
2731 - def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported"
2732 - def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported"
2733 - def ADD(self,first,second): raise SyntaxError, "Not supported"
2734 - def SUB(self,first,second): raise SyntaxError, "Not supported"
2735 - def MUL(self,first,second): raise SyntaxError, "Not supported"
2736 - def DIV(self,first,second): raise SyntaxError, "Not supported"
2737 - def LOWER(self,first): raise SyntaxError, "Not supported"
2738 - def UPPER(self,first): raise SyntaxError, "Not supported"
2739 - def EXTRACT(self,first,what): raise SyntaxError, "Not supported"
2740 - def AGGREGATE(self,first,what): raise SyntaxError, "Not supported"
2741 - def LEFT_JOIN(self): raise SyntaxError, "Not supported"
2742 - def RANDOM(self): raise SyntaxError, "Not supported"
2743 - def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported"
2744 - def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported"
2745 - def LIKE(self,first,second): raise SyntaxError, "Not supported"
2746 - def drop(self,table,mode): raise SyntaxError, "Not supported"
2747 - def alias(self,table,alias): raise SyntaxError, "Not supported"
2748 - def migrate_table(self,*a,**b): raise SyntaxError, "Not supported"
2749 - def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported"
2750 - def prepare(self,key): raise SyntaxError, "Not supported"
2751 - def commit_prepared(self,key): raise SyntaxError, "Not supported"
2752 - def rollback_prepared(self,key): raise SyntaxError, "Not supported"
2753 - def concat_add(self,table): raise SyntaxError, "Not supported"
2754 - def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported"
2755 - def create_sequence_and_triggers(self, query, table, **args): pass
2756 - def log_execute(self,*a,**b): raise SyntaxError, "Not supported"
2757 - def execute(self,*a,**b): raise SyntaxError, "Not supported"
2758 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported"
2759 - def lastrowid(self,table): raise SyntaxError, "Not supported"
2760 - def integrity_error_class(self): raise SyntaxError, "Not supported"
2761 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported"
2762 2763
2764 -class GAEF(object):
2765 - def __init__(self,name,op,value,apply):
2766 self.name=name=='id' and '__key__' or name 2767 self.op=op 2768 self.value=value 2769 self.apply=apply
2770 - def __repr__(self):
2771 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
2772
2773 -class GoogleDatastoreAdapter(NoSQLAdapter):
2774 uploads_in_blob = True 2775 types = {} 2776
2777 - def file_exists(self, filename): pass
2778 - def file_open(self, filename, mode='rb', lock=True): pass
2779 - def file_close(self, fileobj, unlock=True): pass
2780
2781 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2782 credential_decoder=lambda x:x, driver_args={}):
2783 self.types.update({ 2784 'boolean': gae.BooleanProperty, 2785 'string': (lambda: gae.StringProperty(multiline=True)), 2786 'text': gae.TextProperty, 2787 'password': gae.StringProperty, 2788 'blob': gae.BlobProperty, 2789 'upload': gae.StringProperty, 2790 'integer': gae.IntegerProperty, 2791 'double': gae.FloatProperty, 2792 'decimal': GAEDecimalProperty, 2793 'date': gae.DateProperty, 2794 'time': gae.TimeProperty, 2795 'datetime': gae.DateTimeProperty, 2796 'id': None, 2797 'reference': gae.IntegerProperty, 2798 'list:string': (lambda: gae.StringListProperty(default=None)), 2799 'list:integer': (lambda: gae.ListProperty(int,default=None)), 2800 'list:reference': (lambda: gae.ListProperty(int,default=None)), 2801 }) 2802 self.db = db 2803 self.uri = uri 2804 self.dbengine = 'google:datastore' 2805 self.folder = folder 2806 db['_lastsql'] = '' 2807 self.db_codec = 'UTF-8' 2808 self.pool_size = 0 2809 match = re.compile('.*://(?P<namespace>.+)').match(uri) 2810 if match: 2811 namespace_manager.set_namespace(match.group('namespace'))
2812
2813 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
2814 myfields = {} 2815 for k in table.fields: 2816 if isinstance(polymodel,Table) and k in polymodel.fields(): 2817 continue 2818 field = table[k] 2819 attr = {} 2820 if isinstance(field.type, SQLCustomType): 2821 ftype = self.types[field.type.native or field.type.type](**attr) 2822 elif isinstance(field.type, gae.Property): 2823 ftype = field.type 2824 elif field.type.startswith('id'): 2825 continue 2826 elif field.type.startswith('decimal'): 2827 precision, scale = field.type[7:].strip('()').split(',') 2828 precision = int(precision) 2829 scale = int(scale) 2830 ftype = GAEDecimalProperty(precision, scale, **attr) 2831 elif field.type.startswith('reference'): 2832 if field.notnull: 2833 attr = dict(required=True) 2834 referenced = field.type[10:].strip() 2835 ftype = self.types[field.type[:9]](table._db[referenced]) 2836 elif field.type.startswith('list:reference'): 2837 if field.notnull: 2838 attr = dict(required=True) 2839 referenced = field.type[15:].strip() 2840 ftype = self.types[field.type[:14]](**attr) 2841 elif field.type.startswith('list:'): 2842 ftype = self.types[field.type](**attr) 2843 elif not field.type in self.types\ 2844 or not self.types[field.type]: 2845 raise SyntaxError, 'Field: unknown field type: %s' % field.type 2846 else: 2847 ftype = self.types[field.type](**attr) 2848 myfields[field.name] = ftype 2849 if not polymodel: 2850 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 2851 elif polymodel==True: 2852 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 2853 elif isinstance(polymodel,Table): 2854 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 2855 else: 2856 raise SyntaxError, "polymodel must be None, True, a table or a tablename" 2857 return None
2858
2859 - def expand(self,expression,field_type=None):
2860 if isinstance(expression,Field): 2861 if expression.type in ('text','blob'): 2862 raise SyntaxError, 'AppEngine does not index by: %s' % expression.type 2863 return expression.name 2864 elif isinstance(expression, (Expression, Query)): 2865 if not expression.second is None: 2866 return expression.op(expression.first, expression.second) 2867 elif not expression.first is None: 2868 return expression.op(expression.first) 2869 else: 2870 return expression.op() 2871 elif field_type: 2872 return self.represent(expression,field_type) 2873 elif isinstance(expression,(list,tuple)): 2874 return ','.join([self.represent(item,field_type) for item in expression]) 2875 else: 2876 return str(expression)
2877 2878 ### TODO from gql.py Expression
2879 - def AND(self,first,second):
2880 a = self.expand(first) 2881 b = self.expand(second) 2882 if b[0].name=='__key__' and a[0].name!='__key__': 2883 return b+a 2884 return a+b
2885
2886 - def EQ(self,first,second=None):
2887 if isinstance(second, Key): 2888 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 2889 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
2890
2891 - def NE(self,first,second=None):
2892 if first.type != 'id': 2893 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 2894 else: 2895 second = Key.from_path(first._tablename, long(second)) 2896 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
2897
2898 - def LT(self,first,second=None):
2899 if first.type != 'id': 2900 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 2901 else: 2902 second = Key.from_path(first._tablename, long(second)) 2903 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
2904
2905 - def LE(self,first,second=None):
2906 if first.type != 'id': 2907 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 2908 else: 2909 second = Key.from_path(first._tablename, long(second)) 2910 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
2911
2912 - def GT(self,first,second=None):
2913 if first.type != 'id' or second==0 or second == '0': 2914 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 2915 else: 2916 second = Key.from_path(first._tablename, long(second)) 2917 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
2918
2919 - def GE(self,first,second=None):
2920 if first.type != 'id': 2921 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 2922 else: 2923 second = Key.from_path(first._tablename, long(second)) 2924 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
2925
2926 - def INVERT(self,first):
2927 return '-%s' % first.name
2928
2929 - def COMMA(self,first,second):
2930 return '%s, %s' % (self.expand(first),self.expand(second))
2931
2932 - def BELONGS(self,first,second=None):
2933 if not isinstance(second,(list, tuple)): 2934 raise SyntaxError, "Not supported" 2935 if first.type != 'id': 2936 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 2937 else: 2938 second = [Key.from_path(first._tablename, i) for i in second] 2939 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
2940
2941 - def CONTAINS(self,first,second):
2942 if not first.type.startswith('list:'): 2943 raise SyntaxError, "Not supported" 2944 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)]
2945
2946 - def NOT(self,first):
2947 nops = { self.EQ: self.NE, 2948 self.NE: self.EQ, 2949 self.LT: self.GE, 2950 self.GT: self.LE, 2951 self.LE: self.GT, 2952 self.GE: self.LT} 2953 if not isinstance(first,Query): 2954 raise SyntaxError, "Not suported" 2955 nop = nops.get(first.op,None) 2956 if not nop: 2957 raise SyntaxError, "Not suported %s" % first.op.__name__ 2958 first.op = nop 2959 return self.expand(first)
2960
2961 - def truncate(self,table,mode):
2962 self.db(table.id > 0).delete()
2963
2964 - def select_raw(self,query,fields=[],attributes={}):
2965 new_fields = [] 2966 for item in fields: 2967 if isinstance(item,SQLALL): 2968 new_fields += item.table 2969 else: 2970 new_fields.append(item) 2971 fields = new_fields 2972 if query: 2973 tablename = self.get_table(query) 2974 elif fields: 2975 tablename = fields[0].tablename 2976 query = fields[0].table.id>0 2977 else: 2978 raise SyntaxError, "Unable to determine a tablename" 2979 tableobj = self.db[tablename]._tableobj 2980 items = tableobj.all() 2981 filters = self.expand(query) 2982 for filter in filters: 2983 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 2984 continue 2985 elif filter.name=='__key__' and filter.op=='=': 2986 if filter.value==0: 2987 items = [] 2988 elif isinstance(filter.value, Key): 2989 items = tableobj.get(filter.value) 2990 items = (item and [item]) or [] 2991 else: 2992 item = tableobj.get_by_id(filter.value) 2993 items = (item and [item]) or [] 2994 elif isinstance(items,list): # i.e. there is a single record! 2995 items = [i for i in items if filter.apply(getattr(item,filter.name), 2996 filter.value)] 2997 else: 2998 if filter.name=='__key__': items.order('__key__') 2999 items = items.filter('%s %s' % (filter.name,filter.op),filter.value) 3000 if not isinstance(items,list): 3001 if attributes.get('left', None): 3002 raise SyntaxError, 'Set: no left join in appengine' 3003 if attributes.get('groupby', None): 3004 raise SyntaxError, 'Set: no groupby in appengine' 3005 orderby = attributes.get('orderby', False) 3006 if orderby: 3007 ### THIS REALLY NEEDS IMPROVEMENT !!! 3008 if isinstance(orderby, (list, tuple)): 3009 orderby = xorify(orderby) 3010 if isinstance(orderby,Expression): 3011 orderby = self.expand(orderby) 3012 orders = orderby.split(', ') 3013 for order in orders: 3014 order={'-id':'-__key__','id':'__key__'}.get(order,order) 3015 items = items.order(order) 3016 if attributes.get('limitby', None): 3017 (lmin, lmax) = attributes['limitby'] 3018 (limit, offset) = (lmax - lmin, lmin) 3019 items = items.fetch(limit, offset=offset) 3020 fields = self.db[tablename].fields 3021 return (items, tablename, fields)
3022
3023 - def select(self,query,fields,attributes):
3024 (items, tablename, fields) = self.select_raw(query,fields,attributes) 3025 # self.db['_lastsql'] = self._select(query,fields,attributes) 3026 rows = [ 3027 [t=='id' and int(item.key().id()) or getattr(item, t) for t in fields] 3028 for item in items] 3029 colnames = ['%s.%s' % (tablename, t) for t in fields] 3030 return self.parse(rows, colnames, False)
3031 3032
3033 - def count(self,query,distinct=None):
3034 if distinct: 3035 raise RuntimeError, "COUNT DISTINCT not supported" 3036 (items, tablename, fields) = self.select_raw(query) 3037 # self.db['_lastsql'] = self._count(query) 3038 try: 3039 return len(items) 3040 except TypeError: 3041 return items.count(limit=None)
3042
3043 - def delete(self,tablename, query):
3044 """ 3045 This function was changed on 2010-05-04 because according to 3046 http://code.google.com/p/googleappengine/issues/detail?id=3119 3047 GAE no longer support deleting more than 1000 records. 3048 """ 3049 # self.db['_lastsql'] = self._delete(tablename,query) 3050 (items, tablename, fields) = self.select_raw(query) 3051 # items can be one item or a query 3052 if not isinstance(items,list): 3053 counter = items.count(limit=None) 3054 leftitems = items.fetch(1000) 3055 while len(leftitems): 3056 gae.delete(leftitems) 3057 leftitems = items.fetch(1000) 3058 else: 3059 counter = len(items) 3060 gae.delete(items) 3061 return counter
3062
3063 - def update(self,tablename,query,update_fields):
3064 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 3065 (items, tablename, fields) = self.select_raw(query) 3066 counter = 0 3067 for item in items: 3068 for field, value in update_fields: 3069 setattr(item, field.name, self.represent(value,field.type)) 3070 item.put() 3071 counter += 1 3072 logger.info(str(counter)) 3073 return counter
3074
3075 - def insert(self,table,fields):
3076 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 3077 # table._db['_lastsql'] = self._insert(table,fields) 3078 tmp = table._tableobj(**dfields) 3079 tmp.put() 3080 rid = Reference(tmp.key().id()) 3081 (rid._table, rid._record) = (table, None) 3082 return rid
3083
3084 - def bulk_insert(self,table,items):
3085 parsed_items = [] 3086 for item in items: 3087 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 3088 parsed_items.append(table._tableobj(**dfields)) 3089 gae.put(parsed_items) 3090 return True
3091 3092 try: 3093 import couchdb 3094 drivers.append('CouchDB') 3095 except ImportError: 3096 logger.debug('no couchdb driver') 3097
3098 -def uuid2int(uuid):
3099 n=0 3100 for c in uuid: n=n*16+'0123456789abcdef'.find(c) 3101 return n
3102
3103 -def int2uuid(n):
3104 uuid='' 3105 while(n): 3106 n,i = divmod(n,16) 3107 uuid = '0123456789abcdef'[i]+uuid 3108 return uuid
3109
3110 -class CouchDBAdapter(NoSQLAdapter):
3111 uploads_in_blob = True 3112 types = { 3113 'boolean': bool, 3114 'string': str, 3115 'text': str, 3116 'password': str, 3117 'blob': str, 3118 'upload': str, 3119 'integer': long, 3120 'double': float, 3121 'date': datetime.date, 3122 'time': datetime.time, 3123 'datetime': datetime.datetime, 3124 'id': long, 3125 'reference': long, 3126 'list:string': list, 3127 'list:integer': list, 3128 'list:reference': list, 3129 } 3130
3131 - def file_exists(self, filename): pass
3132 - def file_open(self, filename, mode='rb', lock=True): pass
3133 - def file_close(self, fileobj, unlock=True): pass
3134
3135 - def expand(self,expression,field_type=None):
3136 if isinstance(expression,Field): 3137 if expression.type=='id': 3138 return "%s._id" % expression.tablename 3139 return BaseAdapter.expand(self,expression,field_type)
3140
3141 - def AND(self,first,second):
3142 return '(%s && %s)' % (self.expand(first),self.expand(second))
3143
3144 - def OR(self,first,second):
3145 return '(%s || %s)' % (self.expand(first),self.expand(second))
3146
3147 - def EQ(self,first,second):
3148 if second is None: 3149 return '(%s == null)' % self.expand(first) 3150 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
3151
3152 - def NE(self,first,second):
3153 if second is None: 3154 return '(%s != null)' % self.expand(first) 3155 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
3156
3157 - def COMMA(self,first,second):
3158 return '%s + %s' % (self.expand(first),self.expand(second))
3159
3160 - def represent(self, obj, fieldtype):
3161 value = NoSQLAdapter.represent(self, obj, fieldtype) 3162 if fieldtype=='id': 3163 return repr(str(int(value))) 3164 return repr(not isinstance(value,unicode) and value or value.encode('utf8'))
3165
3166 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 3167 pool_size=0,folder=None,db_codec ='UTF-8', 3168 credential_decoder=lambda x:x, driver_args={}):
3169 self.db = db 3170 self.uri = uri 3171 self.dbengine = 'couchdb' 3172 self.folder = folder 3173 db['_lastsql'] = '' 3174 self.db_codec = 'UTF-8' 3175 self.pool_size = pool_size 3176 3177 url='http://'+uri[10:] 3178 def connect(url=url,driver_args=driver_args): 3179 return couchdb.Server(url,**driver_args)
3180 self.pool_connection(connect)
3181
3182 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
3183 if migrate: 3184 try: 3185 self.connection.create(table._tablename) 3186 except: 3187 pass
3188
3189 - def insert(self,table,fields):
3190 id = uuid2int(web2py_uuid()) 3191 ctable = self.connection[table._tablename] 3192 values = dict((k.name,NoSQLAdapter.represent(self,v,k.type)) for k,v in fields) 3193 values['_id'] = str(id) 3194 ctable.save(values) 3195 return id
3196
3197 - def _select(self,query,fields,attributes):
3198 if not isinstance(query,Query): 3199 raise SyntaxError, "Not Supported" 3200 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 3201 'required','cache','left', 3202 'distinct','having')): 3203 raise SyntaxError, 'invalid select attribute: %s' % key 3204 new_fields=[] 3205 for item in fields: 3206 if isinstance(item,SQLALL): 3207 new_fields += item.table 3208 else: 3209 new_fields.append(item) 3210 def uid(fd): 3211 return fd=='id' and '_id' or fd
3212 def get(row,fd): 3213 return fd=='id' and int(row['_id']) or row.get(fd,None) 3214 fields = new_fields 3215 tablename = self.get_table(query) 3216 fieldnames = [f.name for f in (fields or self.db[tablename])] 3217 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 3218 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 3219 fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\ 3220 dict(t=tablename, 3221 query=self.expand(query), 3222 order='%s._id' % tablename, 3223 fields=fields) 3224 return fn, colnames 3225
3226 - def select(self,query,fields,attributes):
3227 if not isinstance(query,Query): 3228 raise SyntaxError, "Not Supported" 3229 fn, colnames = self._select(query,fields,attributes) 3230 tablename = colnames[0].split('.')[0] 3231 ctable = self.connection[tablename] 3232 rows = [cols['value'] for cols in ctable.query(fn)] 3233 return self.parse(rows, colnames, False)
3234
3235 - def delete(self,tablename,query):
3236 if not isinstance(query,Query): 3237 raise SyntaxError, "Not Supported" 3238 if query.first.type=='id' and query.op==self.EQ: 3239 id = query.second 3240 tablename = query.first.tablename 3241 assert(tablename == query.first.tablename) 3242 ctable = self.connection[tablename] 3243 try: 3244 del ctable[str(id)] 3245 return 1 3246 except couchdb.http.ResourceNotFound: 3247 return 0 3248 else: 3249 tablename = self.get_table(query) 3250 rows = self.select(query,[self.db[tablename].id],{}) 3251 ctable = self.connection[tablename] 3252 for row in rows: 3253 del ctable[str(row.id)] 3254 return len(rows)
3255
3256 - def update(self,tablename,query,fields):
3257 if not isinstance(query,Query): 3258 raise SyntaxError, "Not Supported" 3259 if query.first.type=='id' and query.op==self.EQ: 3260 id = query.second 3261 tablename = query.first.tablename 3262 ctable = self.connection[tablename] 3263 try: 3264 doc = ctable[str(id)] 3265 for key,value in fields: 3266 doc[key.name] = NoSQLAdapter.represent(self,value,self.db[tablename][key.name].type) 3267 ctable.save(doc) 3268 return 1 3269 except couchdb.http.ResourceNotFound: 3270 return 0 3271 else: 3272 tablename = self.get_table(query) 3273 rows = self.select(query,[self.db[tablename].id],{}) 3274 ctable = self.connection[tablename] 3275 table = self.db[tablename] 3276 for row in rows: 3277 doc = ctable[str(row.id)] 3278 for key,value in fields: 3279 doc[key.name] = NoSQLAdapter.represent(self,value,table[key.name].type) 3280 ctable.save(doc) 3281 return len(rows)
3282
3283 - def count(self,query,distinct=None):
3284 if distinct: 3285 raise RuntimeError, "COUNT DISTINCT not supported" 3286 if not isinstance(query,Query): 3287 raise SyntaxError, "Not Supported" 3288 tablename = self.get_table(query) 3289 rows = self.select(query,[self.db[tablename].id],{}) 3290 return len(rows)
3291
3292 -def cleanup(text):
3293 """ 3294 validates that the given text is clean: only contains [0-9a-zA-Z_] 3295 """ 3296 3297 if re.compile('[^0-9a-zA-Z_]').findall(text): 3298 raise SyntaxError, \ 3299 'only [0-9a-zA-Z_] allowed in table and field names, received %s' \ 3300 % text 3301 return text
3302 3303 3304 try: 3305 import pymongo 3306 drivers.append('mongoDB') 3307 except: 3308 logger.debug('no mongoDB driver') 3309
3310 -class MongoDBAdapter(NoSQLAdapter):
3311 uploads_in_blob = True 3312 types = { 3313 'boolean': bool, 3314 'string': str, 3315 'text': str, 3316 'password': str, 3317 'blob': str, 3318 'upload': str, 3319 'integer': long, 3320 'double': float, 3321 'date': datetime.date, 3322 'time': datetime.time, 3323 'datetime': datetime.datetime, 3324 'id': long, 3325 'reference': long, 3326 'list:string': list, 3327 'list:integer': list, 3328 'list:reference': list, 3329 } 3330
3331 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 3332 pool_size=0,folder=None,db_codec ='UTF-8', 3333 credential_decoder=lambda x:x, driver_args={}):
3334 self.db = db 3335 self.uri = uri 3336 self.dbengine = 'mongodb' 3337 self.folder = folder 3338 db['_lastsql'] = '' 3339 self.db_codec = 'UTF-8' 3340 self.pool_size = pool_size 3341 3342 m = re.compile('^(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self._uri[10:]) 3343 if not m: 3344 raise SyntaxError, "Invalid URI string in DAL: %s" % self._uri 3345 host = m.group('host') 3346 if not host: 3347 raise SyntaxError, 'mongodb: host name required' 3348 dbname = m.group('db') 3349 if not dbname: 3350 raise SyntaxError, 'mongodb: db name required' 3351 port = m.group('port') or 27017 3352 driver_args.update(dict(host=host,port=port)) 3353 def connect(dbname=dbname,driver_args=driver_args): 3354 return pymongo.Connection(**driver_args)[dbname]
3355 self.pool_connection(connect)
3356
3357 - def insert(self,table,fields):
3358 ctable = self.connection[table._tablename] 3359 values = dict((k,self.represent(v,table[k].type)) for k,v in fields) 3360 ctable.insert(values) 3361 return uuid2int(id)
3362 3363
3364 - def count(self,query):
3365 raise RuntimeError, "Not implemented"
3366
3367 - def select(self,query,fields,attributes):
3368 raise RuntimeError, "Not implemented"
3369
3370 - def delete(self,tablename, query):
3371 raise RuntimeError, "Not implemented"
3372
3373 - def update(self,tablename,query,fields):
3374 raise RuntimeError, "Not implemented"
3375 3376 3377 ######################################################################## 3378 # end of adapters 3379 ######################################################################## 3380 3381 ADAPTERS = { 3382 'sqlite': SQLiteAdapter, 3383 'sqlite:memory': SQLiteAdapter, 3384 'mysql': MySQLAdapter, 3385 'postgres': PostgreSQLAdapter, 3386 'oracle': OracleAdapter, 3387 'mssql': MSSQLAdapter, 3388 'mssql2': MSSQL2Adapter, 3389 'db2': DB2Adapter, 3390 'informix': InformixAdapter, 3391 'firebird': FireBirdAdapter, 3392 'firebird_embedded': FireBirdAdapter, 3393 'ingres': IngresAdapter, 3394 'ingresu': IngresUnicodeAdapter, 3395 'sapdb': SAPDBAdapter, 3396 'jdbc:sqlite': JDBCSQLiteAdapter, 3397 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 3398 'jdbc:postgres': JDBCPostgreSQLAdapter, 3399 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 3400 'google:datastore': GoogleDatastoreAdapter, 3401 'google:sql': GoogleSQLAdapter, 3402 'couchdb': CouchDBAdapter, 3403 'mongodb': CouchDBAdapter, 3404 } 3405 3406
3407 -def sqlhtml_validators(field):
3408 """ 3409 Field type validation, using web2py's validators mechanism. 3410 3411 makes sure the content of a field is in line with the declared 3412 fieldtype 3413 """ 3414 if not have_validators: 3415 return [] 3416 field_type, field_length = field.type, field.length 3417 if isinstance(field_type, SQLCustomType): 3418 if hasattr(field_type, 'validator'): 3419 return field_type.validator 3420 else: 3421 field_type = field_type.type 3422 elif not isinstance(field_type,str): 3423 return [] 3424 requires=[] 3425 def ff(r,id): 3426 row=r(id) 3427 if not row: 3428 return id 3429 elif hasattr(r, '_format') and isinstance(r._format,str): 3430 return r._format % row 3431 elif hasattr(r, '_format') and callable(r._format): 3432 return r._format(row) 3433 else: 3434 return id
3435 if field_type == 'string': 3436 requires.append(validators.IS_LENGTH(field_length)) 3437 elif field_type == 'text': 3438 requires.append(validators.IS_LENGTH(2 ** 16)) 3439 elif field_type == 'password': 3440 requires.append(validators.IS_LENGTH(field_length)) 3441 elif field_type == 'double': 3442 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 3443 elif field_type == 'integer': 3444 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 3445 elif field_type.startswith('decimal'): 3446 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 3447 elif field_type == 'date': 3448 requires.append(validators.IS_DATE()) 3449 elif field_type == 'time': 3450 requires.append(validators.IS_TIME()) 3451 elif field_type == 'datetime': 3452 requires.append(validators.IS_DATETIME()) 3453 elif field.db and field_type.startswith('reference') and \ 3454 field_type.find('.') < 0 and \ 3455 field_type[10:] in field.db.tables: 3456 referenced = field.db[field_type[10:]] 3457 def repr_ref(id, r=referenced, f=ff): return f(r, id) 3458 field.represent = field.represent or repr_ref 3459 if hasattr(referenced, '_format') and referenced._format: 3460 requires = validators.IS_IN_DB(field.db,referenced.id, 3461 referenced._format) 3462 if field.unique: 3463 requires._and = validators.IS_NOT_IN_DB(field.db,field) 3464 if field.tablename == field_type[10:]: 3465 return validators.IS_EMPTY_OR(requires) 3466 return requires 3467 elif field.db and field_type.startswith('list:reference') and \ 3468 field_type.find('.') < 0 and \ 3469 field_type[15:] in field.db.tables: 3470 referenced = field.db[field_type[15:]] 3471 def list_ref_repr(ids, r=referenced, f=ff): 3472 if not ids: 3473 return None 3474 refs = r._db(r.id.belongs(ids)).select(r.id) 3475 return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '') 3476 field.represent = field.represent or list_ref_repr 3477 if hasattr(referenced, '_format') and referenced._format: 3478 requires = validators.IS_IN_DB(field.db,referenced.id, 3479 referenced._format,multiple=True) 3480 else: 3481 requires = validators.IS_IN_DB(field.db,referenced.id, 3482 multiple=True) 3483 if field.unique: 3484 requires._and = validators.IS_NOT_IN_DB(field.db,field) 3485 return requires 3486 elif field_type.startswith('list:'): 3487 def repr_list(values): return', '.join(str(v) for v in (values or [])) 3488 field.represent = field.represent or repr_list 3489 if field.unique: 3490 requires.insert(0,validators.IS_NOT_IN_DB(field.db,field)) 3491 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 3492 if field.notnull and not field_type[:2] in sff: 3493 requires.insert(0, validators.IS_NOT_EMPTY()) 3494 elif not field.notnull and field_type[:2] in sff and requires: 3495 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 3496 return requires 3497 3498
3499 -def bar_escape(item):
3500 return str(item).replace('|', '||')
3501
3502 -def bar_encode(items):
3503 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
3504
3505 -def bar_decode_integer(value):
3506 return [int(x) for x in value.split('|') if x.strip()]
3507
3508 -def bar_decode_string(value):
3509 return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()]
3510 3511
3512 -class Row(dict):
3513 3514 """ 3515 a dictionary that lets you do d['a'] as well as d.a 3516 this is only used to store a Row 3517 """ 3518
3519 - def __getitem__(self, key):
3520 key=str(key) 3521 if key in self.get('_extra',{}): 3522 return self._extra[key] 3523 return dict.__getitem__(self, key)
3524
3525 - def __call__(self,key):
3526 return self.__getitem__(key)
3527
3528 - def __setitem__(self, key, value):
3529 dict.__setitem__(self, str(key), value)
3530
3531 - def __getattr__(self, key):
3532 return self[key]
3533
3534 - def __setattr__(self, key, value):
3535 self[key] = value
3536
3537 - def __repr__(self):
3538 return '<Row ' + dict.__repr__(self) + '>'
3539
3540 - def __int__(self):
3541 return dict.__getitem__(self,'id')
3542
3543 - def __eq__(self,other):
3544 try: 3545 return self.as_dict() == other.as_dict() 3546 except AttributeError: 3547 return False
3548
3549 - def __ne__(self,other):
3550 return not (self == other)
3551
3552 - def __copy__(self):
3553 return Row(dict(self))
3554
3555 - def as_dict(self,datetime_to_str=False):
3556 SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list) 3557 d = dict(self) 3558 for k in copy.copy(d.keys()): 3559 v=d[k] 3560 if d[k] is None: 3561 continue 3562 elif isinstance(v,Row): 3563 d[k]=v.as_dict() 3564 elif isinstance(v,Reference): 3565 d[k]=int(v) 3566 elif isinstance(v,decimal.Decimal): 3567 d[k]=float(v) 3568 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 3569 if datetime_to_str: 3570 d[k] = v.isoformat().replace('T',' ')[:19] 3571 elif not isinstance(v,SERIALIZABLE_TYPES): 3572 del d[k] 3573 return d
3574 3575
3576 -def Row_unpickler(data):
3577 return Row(cPickle.loads(data))
3578
3579 -def Row_pickler(data):
3580 return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),)
3581 3582 copy_reg.pickle(Row, Row_pickler, Row_unpickler) 3583 3584 3585 ################################################################################ 3586 # Everything below should be independent on the specifics of the 3587 # database and should for RDBMs and some NoSQL databases 3588 ################################################################################ 3589
3590 -class SQLCallableList(list):
3591 - def __call__(self):
3592 return copy.copy(self)
3593 3594
3595 -class DAL(dict):
3596 3597 """ 3598 an instance of this class represents a database connection 3599 3600 Example:: 3601 3602 db = DAL('sqlite://test.db') 3603 db.define_table('tablename', Field('fieldname1'), 3604 Field('fieldname2')) 3605 """ 3606 3607 @staticmethod
3608 - def set_folder(folder):
3609 """ 3610 # ## this allows gluon to set a folder for this thread 3611 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 3612 """ 3613 BaseAdapter.set_folder(folder)
3614 3615 @staticmethod
3616 - def distributed_transaction_begin(*instances):
3617 if not instances: 3618 return 3619 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 3620 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 3621 instances = enumerate(instances) 3622 for (i, db) in instances: 3623 if not db._adapter.support_distributed_transaction(): 3624 raise SyntaxError, \ 3625 'distributed transaction not suported by %s' % db._dbname 3626 for (i, db) in instances: 3627 db._adapter.distributed_transaction_begin(keys[i])
3628 3629 @staticmethod
3630 - def distributed_transaction_commit(*instances):
3631 if not instances: 3632 return 3633 instances = enumerate(instances) 3634 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 3635 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 3636 for (i, db) in instances: 3637 if not db._adapter.support_distributed_transaction(): 3638 raise SyntaxError, \ 3639 'distributed transaction not suported by %s' % db._dbanme 3640 try: 3641 for (i, db) in instances: 3642 db._adapter.prepare(keys[i]) 3643 except: 3644 for (i, db) in instances: 3645 db._adapter.rollback_prepared(keys[i]) 3646 raise RuntimeError, 'failure to commit distributed transaction' 3647 else: 3648 for (i, db) in instances: 3649 db._adapter.commit_prepared(keys[i]) 3650 return
3651
3652 - def __init__(self, uri='sqlite://dummy.db', pool_size=0, folder=None, 3653 db_codec='UTF-8', check_reserved=None, 3654 migrate=True, fake_migrate=False, 3655 migrate_enabled=True, fake_migrate_enabled=False, 3656 decode_credentials=False, driver_args=None):
3657 """ 3658 Creates a new Database Abstraction Layer instance. 3659 3660 Keyword arguments: 3661 3662 :uri: string that contains information for connecting to a database. 3663 (default: 'sqlite://dummy.db') 3664 :pool_size: How many open connections to make to the database object. 3665 :folder: <please update me> 3666 :db_codec: string encoding of the database (default: 'UTF-8') 3667 :check_reserved: list of adapters to check tablenames and column names 3668 against sql reserved keywords. (Default None) 3669 3670 * 'common' List of sql keywords that are common to all database types 3671 such as "SELECT, INSERT". (recommended) 3672 * 'all' Checks against all known SQL keywords. (not recommended) 3673 <adaptername> Checks against the specific adapters list of keywords 3674 (recommended) 3675 * '<adaptername>_nonreserved' Checks against the specific adapters 3676 list of nonreserved keywords. (if available) 3677 :migrate (defaults to True) sets default migrate behavior for all tables 3678 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 3679 :migrate_enabled (defaults to True). If set to False disables ALL migrations 3680 :fake_migrate_enabled (defaults to False). If sets to True fake migrates ALL tables 3681 """ 3682 3683 if not decode_credentials: 3684 credential_decoder = lambda cred: cred 3685 else: 3686 credential_decoder = lambda cred: urllib.unquote(cred) 3687 if folder: 3688 self.set_folder(folder) 3689 self._uri = uri 3690 self._pool_size = pool_size 3691 self._db_codec = db_codec 3692 self._lastsql = '' 3693 self._timings = [] 3694 if uri: 3695 uris = isinstance(uri,(list,tuple)) and uri or [uri] 3696 error = '' 3697 connected = False 3698 for k in range(5): 3699 for uri in uris: 3700 try: 3701 if is_jdbc and not uri.startswith('jdbc:'): 3702 uri = 'jdbc:'+uri 3703 self._dbname = regex_dbname.match(uri).group() 3704 if not self._dbname in ADAPTERS: 3705 raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname 3706 # notice that driver args or {} else driver_args defaults to {} global, not correct 3707 args = (self,uri,pool_size,folder,db_codec,credential_decoder,driver_args or {}) 3708 self._adapter = ADAPTERS[self._dbname](*args) 3709 connected = True 3710 break 3711 except SyntaxError: 3712 raise 3713 except Exception, error: 3714 pass 3715 if connected: 3716 break 3717 else: 3718 time.sleep(1) 3719 if not connected: 3720 raise RuntimeError, "Failure to connect, tried 5 times:\n%s" % error 3721 else: 3722 args = (self,'None',0,folder,db_codec) 3723 self._adapter = BaseAdapter(*args) 3724 migrate = fake_migrate = False 3725 self.tables = SQLCallableList() 3726 self.check_reserved = check_reserved 3727 if self.check_reserved: 3728 from reserved_sql_keywords import ADAPTERS as RSK 3729 self.RSK = RSK 3730 self._migrate = migrate 3731 self._fake_migrate = fake_migrate 3732 self._migrate_enabled = migrate_enabled 3733 self._fake_migrate_enabled = fake_migrate_enabled
3734
3735 - def check_reserved_keyword(self, name):
3736 """ 3737 Validates ``name`` against SQL keywords 3738 Uses self.check_reserve which is a list of 3739 operators to use. 3740 self.check_reserved 3741 ['common', 'postgres', 'mysql'] 3742 self.check_reserved 3743 ['all'] 3744 """ 3745 for backend in self.check_reserved: 3746 if name.upper() in self.RSK[backend]: 3747 raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper())
3748
3749 - def __contains__(self, tablename):
3750 if self.has_key(tablename): 3751 return True 3752 else: 3753 return False
3754
3755 - def parse_as_rest(self,patterns,args,vars,query=None,nested_select=True):
3756 """ 3757 EXAMPLE: 3758 3759 db.define_table('person',Field('name'),Field('info')) 3760 db.define_table('pet',Field('person',db.person),Field('name'),Field('info')) 3761 3762 @request.restful() 3763 def index(): 3764 def GET(*args,**vars): 3765 patterns = [ 3766 "/persons[person]", 3767 "/{person.name.startswith}", 3768 "/{person.name}/:field", 3769 "/{person.name}/pets[pet.person]", 3770 "/{person.name}/pet[pet.person]/{pet.name}", 3771 "/{person.name}/pet[pet.person]/{pet.name}/:field" 3772 ] 3773 parser = db.parse_as_rest(patterns,args,vars) 3774 if parser.status == 200: 3775 return dict(content=parser.response) 3776 else: 3777 raise HTTP(parser.status,parser.error) 3778 def POST(table_name,**vars): 3779 if table_name == 'person': 3780 return db.person.validate_and_insert(**vars) 3781 elif table_name == 'pet': 3782 return db.pet.validate_and_insert(**vars) 3783 else: 3784 raise HTTP(400) 3785 return locals() 3786 """ 3787 3788 db = self 3789 re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 3790 re2 = re.compile('^.+\[.+\]$') 3791 3792 def auto_table(table,base='',depth=0): 3793 patterns = [] 3794 for field in db[table].fields: 3795 if base: 3796 tag = '%s/%s' % (base,field.replace('_','-')) 3797 else: 3798 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 3799 f = db[table][field] 3800 if not f.readable: continue 3801 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 3802 tag += '/{%s.%s}' % (table,field) 3803 patterns.append(tag) 3804 patterns.append(tag+'/:field') 3805 elif f.type.startswith('boolean'): 3806 tag += '/{%s.%s}' % (table,field) 3807 patterns.append(tag) 3808 patterns.append(tag+'/:field') 3809 elif f.type.startswith('double') or f.type.startswith('integer'): 3810 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 3811 patterns.append(tag) 3812 patterns.append(tag+'/:field') 3813 elif f.type.startswith('list:'): 3814 tag += '/{%s.%s.contains}' % (table,field) 3815 patterns.append(tag) 3816 patterns.append(tag+'/:field') 3817 elif f.type in ('date','datetime'): 3818 tag+= '/{%s.%s.year}' % (table,field) 3819 patterns.append(tag) 3820 patterns.append(tag+'/:field') 3821 tag+='/{%s.%s.month}' % (table,field) 3822 patterns.append(tag) 3823 patterns.append(tag+'/:field') 3824 tag+='/{%s.%s.day}' % (table,field) 3825 patterns.append(tag) 3826 patterns.append(tag+'/:field') 3827 if f.type in ('datetime','time'): 3828 tag+= '/{%s.%s.hour}' % (table,field) 3829 patterns.append(tag) 3830 patterns.append(tag+'/:field') 3831 tag+='/{%s.%s.minute}' % (table,field) 3832 patterns.append(tag) 3833 patterns.append(tag+'/:field') 3834 tag+='/{%s.%s.second}' % (table,field) 3835 patterns.append(tag) 3836 patterns.append(tag+'/:field') 3837 if depth>0: 3838 for rtable,rfield in db[table]._referenced_by: 3839 tag+='/%s[%s.%s]' % (rtable,rtable,rfield) 3840 patterns.append(tag) 3841 patterns += auto_table(rtable,base=tag,depth=depth-1) 3842 return patterns
3843 3844 if patterns=='auto': 3845 patterns=[] 3846 for table in db.tables: 3847 if not table.startswith('auth_'): 3848 patterns += auto_table(table,base='',depth=1) 3849 else: 3850 i = 0 3851 while i<len(patterns): 3852 pattern = patterns[i] 3853 tokens = pattern.split('/') 3854 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 3855 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],'/'.join(tokens[:-1])) 3856 patterns = patterns[:i]+new_patterns+patterns[i+1:] 3857 i += len(new_patterns) 3858 else: 3859 i += 1 3860 if '/'.join(args) == 'patterns': 3861 return Row({'status':200,'pattern':'list', 3862 'error':None,'response':patterns}) 3863 for pattern in patterns: 3864 otable=table=None 3865 dbset=db(query) 3866 i=0 3867 tags = pattern[1:].split('/') 3868 # print pattern 3869 if len(tags)!=len(args): 3870 continue 3871 for tag in tags: 3872 # print i, tag, args[i] 3873 if re1.match(tag): 3874 # print 're1:'+tag 3875 tokens = tag[1:-1].split('.') 3876 table, field = tokens[0], tokens[1] 3877 if not otable or table == otable: 3878 if len(tokens)==2 or tokens[2]=='eq': 3879 query = db[table][field]==args[i] 3880 elif tokens[2]=='ne': 3881 query = db[table][field]!=args[i] 3882 elif tokens[2]=='lt': 3883 query = db[table][field]<args[i] 3884 elif tokens[2]=='gt': 3885 query = db[table][field]>args[i] 3886 elif tokens[2]=='ge': 3887 query = db[table][field]>=args[i] 3888 elif tokens[2]=='le': 3889 query = db[table][field]<=args[i] 3890 elif tokens[2]=='year': 3891 query = db[table][field].year()==args[i] 3892 elif tokens[2]=='month': 3893 query = db[table][field].month()==args[i] 3894 elif tokens[2]=='day': 3895 query = db[table][field].day()==args[i] 3896 elif tokens[2]=='hour': 3897 query = db[table][field].hour()==args[i] 3898 elif tokens[2]=='minute': 3899 query = db[table][field].minutes()==args[i] 3900 elif tokens[2]=='second': 3901 query = db[table][field].seconds()==args[i] 3902 elif tokens[2]=='startswith': 3903 query = db[table][field].startswith(args[i]) 3904 elif tokens[2]=='contains': 3905 query = db[table][field].contains(args[i]) 3906 else: 3907 raise RuntimeError, "invalid pattern: %s" % pattern 3908 if len(tokens)==4 and tokens[3]=='not': 3909 query = ~query 3910 elif len(tokens)>=4: 3911 raise RuntimeError, "invalid pattern: %s" % pattern 3912 dbset=dbset(query) 3913 else: 3914 raise RuntimeError, "missing relation in pattern: %s" % pattern 3915 elif otable and re2.match(tag) and args[i]==tag[:tag.find('[')]: 3916 # print 're2:'+tag 3917 ref = tag[tag.find('[')+1:-1] 3918 if '.' in ref: 3919 table,field = ref.split('.') 3920 # print table,field 3921 if nested_select: 3922 try: 3923 dbset=db(db[table][field].belongs(dbset._select(db[otable]._id))) 3924 except ValueError: 3925 return Row({'status':400,'pattern':pattern, 3926 'error':'invalid path','response':None}) 3927 else: 3928 items = [item.id for item in dbset.select(db[otable]._id)] 3929 dbset=db(db[table][field].belongs(items)) 3930 else: 3931 dbset=dbset(db[ref]) 3932 elif tag==':field' and table: 3933 # # print 're3:'+tag 3934 field = args[i] 3935 if not field in db[table]: break 3936 try: 3937 item = dbset.select(db[table][field],limitby=(0,1)).first() 3938 except ValueError: 3939 return Row({'status':400,'pattern':pattern, 3940 'error':'invalid path','response':None}) 3941 if not item: 3942 return Row({'status':404,'pattern':pattern, 3943 'error':'record not found','response':None}) 3944 else: 3945 return Row({'status':200,'response':item[field], 3946 'pattern':pattern}) 3947 elif tag != args[i]: 3948 break 3949 otable = table 3950 i += 1 3951 if i==len(tags) and table: 3952 otable,ofield = vars.get('order','%s.%s' % (table,field)).split('.',1) 3953 try: 3954 if otable[:1]=='~': orderby = ~db[otable[1:]][ofield] 3955 else: orderby = db[otable][ofield] 3956 except KeyError: 3957 return Row({'status':400,'error':'invalid orderby','response':None}) 3958 fields = [field for field in db[table] if field.readable] 3959 count = dbset.count() 3960 try: 3961 limits = (int(vars.get('min',0)),int(vars.get('max',1000))) 3962 if limits[0]<0 or limits[1]<limits[0]: raise ValueError 3963 except ValueError: 3964 Row({'status':400,'error':'invalid limits','response':None}) 3965 if count > limits[1]-limits[0]: 3966 Row({'status':400,'error':'too many records','response':None}) 3967 try: 3968 response = dbset.select(limitby=limits,orderby=orderby,*fields) 3969 except ValueError: 3970 return Row({'status':400,'pattern':pattern, 3971 'error':'invalid path','response':None}) 3972 return Row({'status':200,'response':response,'pattern':pattern}) 3973 return Row({'status':400,'error':'no mathcing pattern','response':None})
3974 3975
3976 - def define_table( 3977 self, 3978 tablename, 3979 *fields, 3980 **args 3981 ):
3982 3983 for key in args: 3984 if key not in [ 3985 'migrate', 3986 'primarykey', 3987 'fake_migrate', 3988 'format', 3989 'trigger_name', 3990 'sequence_name', 3991 'polymodel']: 3992 raise SyntaxError, 'invalid table "%s" attribute: %s' % (tablename, key) 3993 migrate = self._migrate_enabled and args.get('migrate',self._migrate) 3994 fake_migrate = self._fake_migrate_enabled or args.get('fake_migrate',self._fake_migrate) 3995 format = args.get('format',None) 3996 trigger_name = args.get('trigger_name', None) 3997 sequence_name = args.get('sequence_name', None) 3998 primarykey=args.get('primarykey',None) 3999 polymodel=args.get('polymodel',None) 4000 if not isinstance(tablename,str): 4001 raise SyntaxError, "missing table name" 4002 tablename = cleanup(tablename) 4003 lowertablename = tablename.lower() 4004 4005 if tablename.startswith('_') or hasattr(self,lowertablename) or \ 4006 regex_python_keywords.match(tablename): 4007 raise SyntaxError, 'invalid table name: %s' % tablename 4008 elif lowertablename in self.tables: 4009 raise SyntaxError, 'table already defined: %s' % tablename 4010 elif self.check_reserved: 4011 self.check_reserved_keyword(tablename) 4012 4013 t = self[tablename] = Table(self, tablename, *fields, 4014 **dict(primarykey=primarykey, 4015 trigger_name=trigger_name, 4016 sequence_name=sequence_name)) 4017 # db magic 4018 if self._uri in (None,'None'): 4019 return t 4020 4021 t._create_references() 4022 4023 if migrate or self._adapter.dbengine=='google:datastore': 4024 try: 4025 sql_locker.acquire() 4026 self._adapter.create_table(t,migrate=migrate, 4027 fake_migrate=fake_migrate, 4028 polymodel=polymodel) 4029 finally: 4030 sql_locker.release() 4031 else: 4032 t._dbt = None 4033 self.tables.append(tablename) 4034 t._format = format 4035 return t
4036
4037 - def __iter__(self):
4038 for tablename in self.tables: 4039 yield self[tablename]
4040
4041 - def __getitem__(self, key):
4042 return dict.__getitem__(self, str(key))
4043
4044 - def __setitem__(self, key, value):
4045 dict.__setitem__(self, str(key), value)
4046
4047 - def __getattr__(self, key):
4048 return self[key]
4049
4050 - def __setattr__(self, key, value):
4051 if key[:1]!='_' and key in self: 4052 raise SyntaxError, \ 4053 'Object %s exists and cannot be redefined' % key 4054 self[key] = value
4055
4056 - def __repr__(self):
4057 return '<DAL ' + dict.__repr__(self) + '>'
4058
4059 - def __call__(self, query=None):
4060 if isinstance(query,Table): 4061 query = query._id>0 4062 elif isinstance(query,Field): 4063 query = query!=None 4064 return Set(self, query)
4065
4066 - def commit(self):
4067 self._adapter.commit()
4068
4069 - def rollback(self):
4070 self._adapter.rollback()
4071
4072 - def executesql(self, query, placeholders=None, as_dict=False):
4073 """ 4074 placeholders is optional and will always be None when using DAL 4075 if using raw SQL with placeholders, placeholders may be 4076 a sequence of values to be substituted in 4077 or, *if supported by the DB driver*, a dictionary with keys 4078 matching named placeholders in your SQL. 4079 4080 Added 2009-12-05 "as_dict" optional argument. Will always be 4081 None when using DAL. If using raw SQL can be set to True 4082 and the results cursor returned by the DB driver will be 4083 converted to a sequence of dictionaries keyed with the db 4084 field names. Tested with SQLite but should work with any database 4085 since the cursor.description used to get field names is part of the 4086 Python dbi 2.0 specs. Results returned with as_dict = True are 4087 the same as those returned when applying .to_list() to a DAL query. 4088 4089 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 4090 4091 --bmeredyk 4092 """ 4093 if placeholders: 4094 self._adapter.execute(query, placeholders) 4095 else: 4096 self._adapter.execute(query) 4097 if as_dict: 4098 if not hasattr(self._adapter.cursor,'description'): 4099 raise RuntimeError, "database does not support executesql(...,as_dict=True)" 4100 # Non-DAL legacy db query, converts cursor results to dict. 4101 # sequence of 7-item sequences. each sequence tells about a column. 4102 # first item is always the field name according to Python Database API specs 4103 columns = self._adapter.cursor.description 4104 # reduce the column info down to just the field names 4105 fields = [f[0] for f in columns] 4106 # will hold our finished resultset in a list 4107 data = self._adapter.cursor.fetchall() 4108 # convert the list for each row into a dictionary so it's 4109 # easier to work with. row['field_name'] rather than row[0] 4110 return [dict(zip(fields,row)) for row in data] 4111 # see if any results returned from database 4112 try: 4113 return self._adapter.cursor.fetchall() 4114 except: 4115 return None
4116
4117 - def _update_referenced_by(self, other):
4118 for tablename in self.tables: 4119 by = self[tablename]._referenced_by 4120 by[:] = [item for item in by if not item[0] == other]
4121
4122 - def export_to_csv_file(self, ofile, *args, **kwargs):
4123 for table in self.tables: 4124 ofile.write('TABLE %s\r\n' % table) 4125 self(self[table]._id > 0).select().export_to_csv_file(ofile, *args, **kwargs) 4126 ofile.write('\r\n\r\n') 4127 ofile.write('END')
4128
4129 - def import_from_csv_file(self, ifile, id_map={}, null='<NULL>', 4130 unique='uuid', *args, **kwargs):
4131 for line in ifile: 4132 line = line.strip() 4133 if not line: 4134 continue 4135 elif line == 'END': 4136 return 4137 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 4138 raise SyntaxError, 'invalid file format' 4139 else: 4140 tablename = line[6:] 4141 self[tablename].import_from_csv_file(ifile, id_map, null, 4142 unique, *args, **kwargs)
4143 4144
4145 -class SQLALL(object):
4146 """ 4147 Helper class providing a comma-separated string having all the field names 4148 (prefixed by table name and '.') 4149 4150 normally only called from within gluon.sql 4151 """ 4152
4153 - def __init__(self, table):
4154 self.table = table
4155
4156 - def __str__(self):
4157 return ', '.join([str(field) for field in self.table])
4158 4159
4160 -class Reference(int):
4161
4162 - def __allocate(self):
4163 if not self._record: 4164 self._record = self._table[int(self)] 4165 if not self._record: 4166 raise RuntimeError, "Using a recursive select but encountered a broken reference"
4167
4168 - def __getattr__(self, key):
4169 if key == 'id': 4170 return int(self) 4171 self.__allocate() 4172 return self._record.get(key, None)
4173
4174 - def __setattr__(self, key, value):
4175 if key.startswith('_'): 4176 int.__setattr__(self, key, value) 4177 return 4178 self.__allocate() 4179 self._record[key] = value
4180
4181 - def __getitem__(self, key):
4182 if key == 'id': 4183 return int(self) 4184 self.__allocate() 4185 return self._record.get(key, None)
4186
4187 - def __setitem__(self,key,value):
4188 self.__allocate() 4189 self._record[key] = value
4190 4191
4192 -def Reference_unpickler(data):
4193 return marshal.loads(data)
4194
4195 -def Reference_pickler(data):
4196 try: 4197 marshal_dump = marshal.dumps(int(data)) 4198 except AttributeError: 4199 marshal_dump = 'i%s' % struct.pack('<i', int(data)) 4200 return (Reference_unpickler, (marshal_dump,))
4201 4202 copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler) 4203 4204
4205 -class Table(dict):
4206 4207 """ 4208 an instance of this class represents a database table 4209 4210 Example:: 4211 4212 db = DAL(...) 4213 db.define_table('users', Field('name')) 4214 db.users.insert(name='me') # print db.users._insert(...) to see SQL 4215 db.users.drop() 4216 """ 4217
4218 - def __init__( 4219 self, 4220 db, 4221 tablename, 4222 *fields, 4223 **args 4224 ):
4225 """ 4226 Initializes the table and performs checking on the provided fields. 4227 4228 Each table will have automatically an 'id'. 4229 4230 If a field is of type Table, the fields (excluding 'id') from that table 4231 will be used instead. 4232 4233 :raises SyntaxError: when a supplied field is of incorrect type. 4234 """ 4235 self._tablename = tablename 4236 self._sequence_name = args.get('sequence_name',None) or \ 4237 db and db._adapter.sequence_name(tablename) 4238 self._trigger_name = args.get('trigger_name',None) or \ 4239 db and db._adapter.trigger_name(tablename) 4240 4241 primarykey = args.get('primarykey', None) 4242 if primarykey and not isinstance(primarykey,list): 4243 raise SyntaxError, "primarykey must be a list of fields from table '%s'" \ 4244 % tablename 4245 elif primarykey: 4246 self._primarykey = primarykey 4247 new_fields = [] 4248 else: 4249 new_fields = [ Field('id', 'id') ] 4250 for field in fields: 4251 if isinstance(field, Field): 4252 if hasattr(field, '_db'): 4253 field = copy.copy(field) 4254 if field.type == 'id': 4255 # Keep this alias for the primary key. 4256 new_fields[0] = field 4257 else: 4258 new_fields.append(field) 4259 elif isinstance(field, Table): 4260 new_fields += [copy.copy(field[f]) for f in 4261 field.fields if field[f].type!='id'] 4262 else: 4263 raise SyntaxError, \ 4264 'define_table argument is not a Field or Table: %s' % field 4265 fields = new_fields 4266 self._db = db 4267 self._id = fields[0] 4268 tablename = tablename 4269 self.fields = SQLCallableList() 4270 self.virtualfields = [] 4271 fields = list(fields) 4272 4273 if db and self._db._adapter.uploads_in_blob==True: 4274 for field in fields: 4275 if isinstance(field, Field) and field.type == 'upload'\ 4276 and field.uploadfield is True: 4277 tmp = field.uploadfield = '%s_blob' % field.name 4278 fields.append(self._db.Field(tmp, 'blob', default='')) 4279 4280 lower_fieldnames = set() 4281 for field in fields: 4282 if db and db.check_reserved: 4283 db.check_reserved_keyword(field.name) 4284 4285 if field.name.lower() in lower_fieldnames: 4286 raise SyntaxError, "duplicate field %s in table %s" % (field.name, tablename) 4287 else: 4288 lower_fieldnames.add(field.name.lower()) 4289 4290 self.fields.append(field.name) 4291 self[field.name] = field 4292 if field.type == 'id': 4293 self['id'] = field 4294 field.tablename = field._tablename = tablename 4295 field.table = field._table = self 4296 field.db = field._db = self._db 4297 field.length = min(field.length,self._db and self._db._adapter.maxcharlength or INFINITY) 4298 if field.requires == DEFAULT: 4299 field.requires = sqlhtml_validators(field) 4300 self.ALL = SQLALL(self) 4301 4302 if hasattr(self,'_primarykey'): 4303 for k in self._primarykey: 4304 if k not in self.fields: 4305 raise SyntaxError, \ 4306 "primarykey must be a list of fields from table '%s " % tablename 4307 else: 4308 self[k].notnull = True
4309
4310 - def _validate(self,**vars):
4311 errors = Row() 4312 for key,value in vars.items(): 4313 value,error = self[key].validate(value) 4314 if error: 4315 errors[key] = error 4316 return errors
4317
4318 - def _create_references(self):
4319 self._referenced_by = [] 4320 for fieldname in self.fields: 4321 field=self[fieldname] 4322 if isinstance(field.type,str) and field.type[:10] == 'reference ': 4323 ref = field.type[10:].strip() 4324 if not ref.split(): 4325 raise SyntaxError, 'Table: reference to nothing: %s' %ref 4326 refs = ref.split('.') 4327 rtablename = refs[0] 4328 if not rtablename in self._db: 4329 raise SyntaxError, "Table: table '%s' does not exist" % rtablename 4330 rtable = self._db[rtablename] 4331 if self._tablename in rtable.fields: 4332 raise SyntaxError, \ 4333 'Field: table %s has same name as a field in referenced table %s' \ 4334 % (self._tablename, rtablename) 4335 elif len(refs)==2: 4336 rfieldname = refs[1] 4337 if not hasattr(rtable,'_primarykey'): 4338 raise SyntaxError,\ 4339 'keyed tables can only reference other keyed tables (for now)' 4340 if rfieldname not in rtable.fields: 4341 raise SyntaxError,\ 4342 "invalid field '%s' for referenced table '%s' in table '%s'" \ 4343 % (rfieldname, rtablename, self._tablename) 4344 rtable._referenced_by.append((self._tablename, field.name))
4345
4346 - def _filter_fields(self, record, id=False):
4347 return dict([(k, v) for (k, v) in record.items() if k 4348 in self.fields and (self[k].type!='id' or id)])
4349
4350 - def _build_query(self,key):
4351 """ for keyed table only """ 4352 query = None 4353 for k,v in key.iteritems(): 4354 if k in self._primarykey: 4355 if query: 4356 query = query & (self[k] == v) 4357 else: 4358 query = (self[k] == v) 4359 else: 4360 raise SyntaxError, \ 4361 'Field %s is not part of the primary key of %s' % \ 4362 (k,self._tablename) 4363 return query
4364
4365 - def __getitem__(self, key):
4366 if not key: 4367 return None 4368 elif isinstance(key, dict): 4369 """ for keyed table """ 4370 query = self._build_query(key) 4371 rows = self._db(query).select() 4372 if rows: 4373 return rows[0] 4374 return None 4375 elif str(key).isdigit(): 4376 return self._db(self.id == key).select(limitby=(0,1)).first() 4377 elif key: 4378 return dict.__getitem__(self, str(key))
4379
4380 - def __call__(self, key=DEFAULT, **kwargs):
4381 if key!=DEFAULT: 4382 if isinstance(key, Query): 4383 record = self._db(key).select(limitby=(0,1)).first() 4384 elif not str(key).isdigit(): 4385 record = None 4386 else: 4387 record = self._db(self.id == key).select(limitby=(0,1)).first() 4388 if record: 4389 for k,v in kwargs.items(): 4390 if record[k]!=v: return None 4391 return record 4392 elif kwargs: 4393 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()]) 4394 return self._db(query).select(limitby=(0,1)).first() 4395 else: 4396 return None
4397
4398 - def __setitem__(self, key, value):
4399 if isinstance(key, dict) and isinstance(value, dict): 4400 """ option for keyed table """ 4401 if set(key.keys()) == set(self._primarykey): 4402 value = self._filter_fields(value) 4403 kv = {} 4404 kv.update(value) 4405 kv.update(key) 4406 if not self.insert(**kv): 4407 query = self._build_query(key) 4408 self._db(query).update(**self._filter_fields(value)) 4409 else: 4410 raise SyntaxError,\ 4411 'key must have all fields from primary key: %s'%\ 4412 (self._primarykey) 4413 elif str(key).isdigit(): 4414 if key == 0: 4415 self.insert(**self._filter_fields(value)) 4416 elif not self._db(self.id == key)\ 4417 .update(**self._filter_fields(value)): 4418 raise SyntaxError, 'No such record: %s' % key 4419 else: 4420 if isinstance(key, dict): 4421 raise SyntaxError,\ 4422 'value must be a dictionary: %s' % value 4423 dict.__setitem__(self, str(key), value)
4424
4425 - def __delitem__(self, key):
4426 if isinstance(key, dict): 4427 query = self._build_query(key) 4428 if not self._db(query).delete(): 4429 raise SyntaxError, 'No such record: %s' % key 4430 elif not str(key).isdigit() or not self._db(self.id == key).delete(): 4431 raise SyntaxError, 'No such record: %s' % key
4432
4433 - def __getattr__(self, key):
4434 return self[key]
4435
4436 - def __setattr__(self, key, value):
4437 if key in self: 4438 raise SyntaxError, 'Object exists and cannot be redefined: %s' % key 4439 self[key] = value
4440
4441 - def __iter__(self):
4442 for fieldname in self.fields: 4443 yield self[fieldname]
4444
4445 - def __repr__(self):
4446 return '<Table ' + dict.__repr__(self) + '>'
4447
4448 - def __str__(self):
4449 if self.get('_ot', None): 4450 return '%s AS %s' % (self._ot, self._tablename) 4451 return self._tablename
4452
4453 - def _drop(self, mode = ''):
4454 return self._db._adapter._drop(self, mode)
4455
4456 - def drop(self, mode = ''):
4457 return self._db._adapter.drop(self,mode)
4458
4459 - def _listify(self,fields,update=False):
4460 new_fields = [] 4461 new_fields_names = [] 4462 for name in fields: 4463 if not name in self.fields: 4464 raise SyntaxError, 'Field %s does not belong to the table' % name 4465 new_fields.append((self[name],fields[name])) 4466 new_fields_names.append(name) 4467 for ofield in self: 4468 if not ofield.name in new_fields_names: 4469 if not update and ofield.default!=None: 4470 new_fields.append((ofield,ofield.default)) 4471 elif update and ofield.update!=None: 4472 new_fields.append((ofield,ofield.update)) 4473 for ofield in self: 4474 if not ofield.name in new_fields_names and ofield.compute: 4475 try: 4476 new_fields.append((ofield,ofield.compute(Row(fields)))) 4477 except KeyError: 4478 pass 4479 if not update and ofield.required and not ofield.name in new_fields_names: 4480 raise SyntaxError,'Table: missing required field: %s' % ofield.name 4481 return new_fields
4482
4483 - def _insert(self, **fields):
4484 return self._db._adapter._insert(self,self._listify(fields))
4485
4486 - def insert(self, **fields):
4487 return self._db._adapter.insert(self,self._listify(fields))
4488
4489 - def validate_and_insert(self,**fields):
4490 response = Row() 4491 response.errors = self._validate(**fields) 4492 if not response.errors: 4493 response.id = self.insert(**fields) 4494 else: 4495 response.id = None 4496 return response
4497
4498 - def update_or_insert(self, key=DEFAULT, **values):
4499 if key==DEFAULT: 4500 record = self(**values) 4501 else: 4502 record = self(key) 4503 if record: 4504 record.update_record(**values) 4505 newid = None 4506 else: 4507 newid = self.insert(**values) 4508 return newid
4509
4510 - def bulk_insert(self, items):
4511 """ 4512 here items is a list of dictionaries 4513 """ 4514 items = [self._listify(item) for item in items] 4515 return self._db._adapter.bulk_insert(self,items)
4516
4517 - def _truncate(self, mode = None):
4518 return self._db._adapter._truncate(self, mode)
4519
4520 - def truncate(self, mode = None):
4521 return self._db._adapter.truncate(self, mode)
4522
4523 - def import_from_csv_file( 4524 self, 4525 csvfile, 4526 id_map=None, 4527 null='<NULL>', 4528 unique='uuid', 4529 *args, **kwargs 4530 ):
4531 """ 4532 import records from csv file. Column headers must have same names as 4533 table fields. field 'id' is ignored. If column names read 'table.file' 4534 the 'table.' prefix is ignored. 4535 'unique' argument is a field which must be unique 4536 (typically a uuid field) 4537 """ 4538 4539 delimiter = kwargs.get('delimiter', ',') 4540 quotechar = kwargs.get('quotechar', '"') 4541 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 4542 4543 reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) 4544 colnames = None 4545 if isinstance(id_map, dict): 4546 if not self._tablename in id_map: 4547 id_map[self._tablename] = {} 4548 id_map_self = id_map[self._tablename] 4549 4550 def fix(field, value, id_map): 4551 if value == null: 4552 value = None 4553 elif field.type.startswith('list:string'): 4554 value = bar_decode_string(value) 4555 elif field.type.startswith('list:reference'): 4556 ref_table = field.type[10:].strip() 4557 value = [id_map[ref_table][int(v)] for v in bar_decode_string(value)] 4558 elif field.type.startswith('list:'): 4559 value = bar_decode_integer(value) 4560 elif id_map and field.type.startswith('reference'): 4561 try: 4562 value = id_map[field.type[9:].strip()][value] 4563 except KeyError: 4564 pass 4565 return (field.name, value)
4566 4567 def is_id(colname): 4568 if colname in self: 4569 return self[colname].type == 'id' 4570 else: 4571 return False
4572 4573 for line in reader: 4574 if not line: 4575 break 4576 if not colnames: 4577 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 4578 cols, cid = [], [] 4579 for i,colname in enumerate(colnames): 4580 if is_id(colname): 4581 cid = i 4582 else: 4583 cols.append(i) 4584 if colname == unique: 4585 unique_idx = i 4586 else: 4587 items = [fix(self[colnames[i]], line[i], id_map) for i in cols] 4588 # Validation. Check for duplicate of 'unique' &, 4589 # if present, update instead of insert. 4590 if not unique or unique not in colnames: 4591 new_id = self.insert(**dict(items)) 4592 else: 4593 unique_value = line[unique_idx] 4594 query = self._db[self][unique] == unique_value 4595 record = self._db(query).select().first() 4596 if record: 4597 record.update_record(**dict(items)) 4598 new_id = record[self._id.name] 4599 else: 4600 new_id = self.insert(**dict(items)) 4601 if id_map and cid != []: 4602 id_map_self[line[cid]] = new_id 4603
4604 - def with_alias(self, alias):
4605 return self._db._adapter.alias(self,alias)
4606
4607 - def on(self, query):
4608 return Expression(self._db,self._db._adapter.ON,self,query)
4609 4610 4611
4612 -class Expression(object):
4613
4614 - def __init__( 4615 self, 4616 db, 4617 op, 4618 first=None, 4619 second=None, 4620 type=None, 4621 ):
4622 4623 self.db = db 4624 self.op = op 4625 self.first = first 4626 self.second = second 4627 ### self._tablename = first._tablename ## CHECK 4628 if not type and first and hasattr(first,'type'): 4629 self.type = first.type 4630 else: 4631 self.type = type
4632
4633 - def sum(self):
4634 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type)
4635
4636 - def max(self):
4637 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type)
4638
4639 - def min(self):
4640 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type)
4641
4642 - def len(self):
4643 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
4644
4645 - def lower(self):
4646 return Expression(self.db, self.db._adapter.LOWER, self, None, self.type)
4647
4648 - def upper(self):
4649 return Expression(self.db, self.db._adapter.UPPER, self, None, self.type)
4650
4651 - def year(self):
4652 return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer')
4653
4654 - def month(self):
4655 return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer')
4656
4657 - def day(self):
4658 return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer')
4659
4660 - def hour(self):
4661 return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer')
4662
4663 - def minutes(self):
4664 return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer')
4665
4666 - def coalesce_zero(self):
4667 return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type)
4668
4669 - def seconds(self):
4670 return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer')
4671
4672 - def __getslice__(self, start, stop):
4673 if start < 0: 4674 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 4675 else: 4676 pos0 = start + 1 4677 4678 if stop < 0: 4679 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 4680 elif stop == sys.maxint: 4681 length = self.len() 4682 else: 4683 length = '(%s - %s)' % (stop + 1, pos0) 4684 return Expression(self.db,self.db._adapter.SUBSTRING, 4685 self, (pos0, length), self.type)
4686
4687 - def __getitem__(self, i):
4688 return self[i:i + 1]
4689
4690 - def __str__(self):
4691 return self.db._adapter.expand(self,self.type)
4692
4693 - def __or__(self, other): # for use in sortby
4694 return Expression(self.db,self.db._adapter.COMMA,self,other,self.type)
4695
4696 - def __invert__(self):
4697 if hasattr(self,'_op') and self.op == self.db._adapter.INVERT: 4698 return self.first 4699 return Expression(self.db,self.db._adapter.INVERT,self,type=self.type)
4700
4701 - def __add__(self, other):
4702 return Expression(self.db,self.db._adapter.ADD,self,other,self.type)
4703
4704 - def __sub__(self, other):
4705 if self.type == 'integer': 4706 result_type = 'integer' 4707 elif self.type in ['date','time','datetime','double']: 4708 result_type = 'double' 4709 else: 4710 raise SyntaxError, "subtraction operation not supported for type" 4711 return Expression(self.db,self.db._adapter.SUB,self,other, 4712 result_type)
4713 - def __mul__(self, other):
4714 return Expression(self.db,self.db._adapter.MUL,self,other,self.type)
4715
4716 - def __div__(self, other):
4717 return Expression(self.db,self.db._adapter.DIV,self,other,self.type)
4718
4719 - def __mod__(self, other):
4720 return Expression(self.db,self.db._adapter.MOD,self,other,self.type)
4721
4722 - def __eq__(self, value):
4723 return Query(self.db, self.db._adapter.EQ, self, value)
4724
4725 - def __ne__(self, value):
4726 return Query(self.db, self.db._adapter.NE, self, value)
4727
4728 - def __lt__(self, value):
4729 return Query(self.db, self.db._adapter.LT, self, value)
4730
4731 - def __le__(self, value):
4732 return Query(self.db, self.db._adapter.LE, self, value)
4733
4734 - def __gt__(self, value):
4735 return Query(self.db, self.db._adapter.GT, self, value)
4736
4737 - def __ge__(self, value):
4738 return Query(self.db, self.db._adapter.GE, self, value)
4739
4740 - def like(self, value):
4741 return Query(self.db, self.db._adapter.LIKE, self, value)
4742
4743 - def belongs(self, value):
4744 return Query(self.db, self.db._adapter.BELONGS, self, value)
4745
4746 - def startswith(self, value):
4747 if not self.type in ('string', 'text'): 4748 raise SyntaxError, "startswith used with incompatible field type" 4749 return Query(self.db, self.db._adapter.STARTSWITH, self, value)
4750
4751 - def endswith(self, value):
4752 if not self.type in ('string', 'text'): 4753 raise SyntaxError, "endswith used with incompatible field type" 4754 return Query(self.db, self.db._adapter.ENDSWITH, self, value)
4755
4756 - def contains(self, value):
4757 if not self.type in ('string', 'text') and not self.type.startswith('list:'): 4758 raise SyntaxError, "contains used with incompatible field type" 4759 return Query(self.db, self.db._adapter.CONTAINS, self, value)
4760
4761 - def with_alias(self,alias):
4762 return Expression(self.db,self.db._adapter.AS,self,alias,self.type)
4763 4764 # for use in both Query and sortby 4765 4766
4767 -class SQLCustomType(object):
4768 """ 4769 allows defining of custom SQL types 4770 4771 Example:: 4772 4773 decimal = SQLCustomType( 4774 type ='double', 4775 native ='integer', 4776 encoder =(lambda x: int(float(x) * 100)), 4777 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 4778 ) 4779 4780 db.define_table( 4781 'example', 4782 Field('value', type=decimal) 4783 ) 4784 4785 :param type: the web2py type (default = 'string') 4786 :param native: the backend type 4787 :param encoder: how to encode the value to store it in the backend 4788 :param decoder: how to decode the value retrieved from the backend 4789 :param validator: what validators to use ( default = None, will use the 4790 default validator for type) 4791 """ 4792
4793 - def __init__( 4794 self, 4795 type='string', 4796 native=None, 4797 encoder=None, 4798 decoder=None, 4799 validator=None, 4800 _class=None, 4801 ):
4802 4803 self.type = type 4804 self.native = native 4805 self.encoder = encoder or (lambda x: x) 4806 self.decoder = decoder or (lambda x: x) 4807 self.validator = validator 4808 self._class = _class or type
4809
4810 - def startswith(self, dummy=None):
4811 return False
4812
4813 - def __getslice__(self, a=0, b=100):
4814 return None
4815
4816 - def __getitem__(self, i):
4817 return None
4818
4819 - def __str__(self):
4820 return self._class
4821 4822
4823 -class Field(Expression):
4824 4825 """ 4826 an instance of this class represents a database field 4827 4828 example:: 4829 4830 a = Field(name, 'string', length=32, default=None, required=False, 4831 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 4832 notnull=False, unique=False, 4833 uploadfield=True, widget=None, label=None, comment=None, 4834 uploadfield=True, # True means store on disk, 4835 # 'a_field_name' means store in this field in db 4836 # False means file content will be discarded. 4837 writable=True, readable=True, update=None, authorize=None, 4838 autodelete=False, represent=None, uploadfolder=None, 4839 uploadseparate=False # upload to separate directories by uuid_keys 4840 # first 2 character and tablename.fieldname 4841 # False - old behavior 4842 # True - put uploaded file in 4843 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 4844 # directory) 4845 4846 to be used as argument of DAL.define_table 4847 4848 allowed field types: 4849 string, boolean, integer, double, text, blob, 4850 date, time, datetime, upload, password 4851 4852 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 4853 fields should have a default or they will be required in SQLFORMs 4854 the requires argument is used to validate the field input in SQLFORMs 4855 4856 """ 4857
4858 - def __init__( 4859 self, 4860 fieldname, 4861 type='string', 4862 length=None, 4863 default=DEFAULT, 4864 required=False, 4865 requires=DEFAULT, 4866 ondelete='CASCADE', 4867 notnull=False, 4868 unique=False, 4869 uploadfield=True, 4870 widget=None, 4871 label=None, 4872 comment=None, 4873 writable=True, 4874 readable=True, 4875 update=None, 4876 authorize=None, 4877 autodelete=False, 4878 represent=None, 4879 uploadfolder=None, 4880 uploadseparate=False, 4881 compute=None, 4882 custom_store=None, 4883 custom_retrieve=None, 4884 ):
4885 self.db = None 4886 self.op = None 4887 self.first = None 4888 self.second = None 4889 if not isinstance(fieldname,str): 4890 raise SyntaxError, "missing field name" 4891 if fieldname.startswith(':'): 4892 fieldname,readable,writable=fieldname[1:],False,False 4893 elif fieldname.startswith('.'): 4894 fieldname,readable,writable=fieldname[1:],False,False 4895 if '=' in fieldname: 4896 fieldname,default = fieldname.split('=',1) 4897 self.name = fieldname = cleanup(fieldname) 4898 if hasattr(Table,fieldname) or fieldname[0] == '_' or \ 4899 regex_python_keywords.match(fieldname): 4900 raise SyntaxError, 'Field: invalid field name: %s' % fieldname 4901 if isinstance(type, Table): 4902 type = 'reference ' + type._tablename 4903 self.type = type # 'string', 'integer' 4904 self.length = (length is None) and MAXCHARLENGTH or length 4905 if default==DEFAULT: 4906 self.default = update or None 4907 else: 4908 self.default = default 4909 self.required = required # is this field required 4910 self.ondelete = ondelete.upper() # this is for reference fields only 4911 self.notnull = notnull 4912 self.unique = unique 4913 self.uploadfield = uploadfield 4914 self.uploadfolder = uploadfolder 4915 self.uploadseparate = uploadseparate 4916 self.widget = widget 4917 self.label = label or ' '.join(item.capitalize() for item in fieldname.split('_')) 4918 self.comment = comment 4919 self.writable = writable 4920 self.readable = readable 4921 self.update = update 4922 self.authorize = authorize 4923 self.autodelete = autodelete 4924 if not represent and type in ('list:integer','list:string'): 4925 represent=lambda x: ', '.join(str(y) for y in x or []) 4926 self.represent = represent 4927 self.compute = compute 4928 self.isattachment = True 4929 self.custom_store = custom_store 4930 self.custom_retrieve = custom_retrieve 4931 if self.label is None: 4932 self.label = ' '.join([x.capitalize() for x in 4933 fieldname.split('_')]) 4934 if requires is None: 4935 self.requires = [] 4936 else: 4937 self.requires = requires
4938
4939 - def store(self, file, filename=None, path=None):
4940 if self.custom_store: 4941 return self.custom_store(file,filename,path) 4942 if not filename: 4943 filename = file.name 4944 filename = os.path.basename(filename.replace('/', os.sep)\ 4945 .replace('\\', os.sep)) 4946 m = re.compile('\.(?P<e>\w{1,5})$').search(filename) 4947 extension = m and m.group('e') or 'txt' 4948 uuid_key = web2py_uuid().replace('-', '')[-16:] 4949 encoded_filename = base64.b16encode(filename).lower() 4950 newfilename = '%s.%s.%s.%s' % \ 4951 (self._tablename, self.name, uuid_key, encoded_filename) 4952 newfilename = newfilename[:200] + '.' + extension 4953 if isinstance(self.uploadfield,Field): 4954 blob_uploadfield_name = self.uploadfield.uploadfield 4955 keys={self.uploadfield.name: newfilename, 4956 blob_uploadfield_name: file.read()} 4957 self.uploadfield.table.insert(**keys) 4958 elif self.uploadfield == True: 4959 if path: 4960 pass 4961 elif self.uploadfolder: 4962 path = self.uploadfolder 4963 elif self.db._adapter.folder: 4964 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 4965 else: 4966 raise RuntimeError, "you must specify a Field(...,uploadfolder=...)" 4967 if self.uploadseparate: 4968 path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2]) 4969 if not os.path.exists(path): 4970 os.makedirs(path) 4971 pathfilename = os.path.join(path, newfilename) 4972 dest_file = open(pathfilename, 'wb') 4973 shutil.copyfileobj(file, dest_file) 4974 dest_file.close() 4975 return newfilename
4976
4977 - def retrieve(self, name, path=None):
4978 if self.custom_retrieve: 4979 return self.custom_retrieve(name, path) 4980 import http 4981 if self.authorize or isinstance(self.uploadfield, str): 4982 row = self.db(self == name).select().first() 4983 if not row: 4984 raise http.HTTP(404) 4985 if self.authorize and not self.authorize(row): 4986 raise http.HTTP(403) 4987 try: 4988 m = regex_content.match(name) 4989 if not m or not self.isattachment: 4990 raise TypeError, 'Can\'t retrieve %s' % name 4991 filename = base64.b16decode(m.group('name'), True) 4992 filename = regex_cleanup_fn.sub('_', filename) 4993 except (TypeError, AttributeError): 4994 filename = name 4995 if isinstance(self.uploadfield, str): # ## if file is in DB 4996 return (filename, cStringIO.StringIO(row[self.uploadfield] or '')) 4997 elif isinstance(self.uploadfield,Field): 4998 blob_uploadfield_name = self.uploadfield.uploadfield 4999 query = self.uploadfield == name 5000 data = self.uploadfield.table(query)[blob_uploadfield_name] 5001 return (filename, cStringIO.StringIO(data)) 5002 else: 5003 # ## if file is on filesystem 5004 if path: 5005 pass 5006 elif self.uploadfolder: 5007 path = self.uploadfolder 5008 else: 5009 path = os.path.join(self.db._adapter.folder, '..', 'uploads') 5010 if self.uploadseparate: 5011 t = m.group('table') 5012 f = m.group('field') 5013 u = m.group('uuidkey') 5014 path = os.path.join(path,"%s.%s" % (t,f),u[:2]) 5015 return (filename, open(os.path.join(path, name), 'rb'))
5016
5017 - def formatter(self, value):
5018 if value is None or not self.requires: 5019 return value 5020 if not isinstance(self.requires, (list, tuple)): 5021 requires = [self.requires] 5022 elif isinstance(self.requires, tuple): 5023 requires = list(self.requires) 5024 else: 5025 requires = copy.copy(self.requires) 5026 requires.reverse() 5027 for item in requires: 5028 if hasattr(item, 'formatter'): 5029 value = item.formatter(value) 5030 return value
5031
5032 - def validate(self, value):
5033 if not self.requires: 5034 return (value, None) 5035 requires = self.requires 5036 if not isinstance(requires, (list, tuple)): 5037 requires = [requires] 5038 for validator in requires: 5039 (value, error) = validator(value) 5040 if error: 5041 return (value, error) 5042 return (value, None)
5043
5044 - def count(self):
5045 return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer')
5046
5047 - def __nonzero__(self):
5048 return True
5049
5050 - def __str__(self):
5051 try: 5052 return '%s.%s' % (self.tablename, self.name) 5053 except: 5054 return '<no table>.%s' % self.name
5055 5056
5057 -class Query(object):
5058 5059 """ 5060 a query object necessary to define a set. 5061 it can be stored or can be passed to DAL.__call__() to obtain a Set 5062 5063 Example:: 5064 5065 query = db.users.name=='Max' 5066 set = db(query) 5067 records = set.select() 5068 5069 """ 5070
5071 - def __init__( 5072 self, 5073 db, 5074 op, 5075 first=None, 5076 second=None, 5077 ):
5078 self.db = db 5079 self.op = op 5080 self.first = first 5081 self.second = second
5082
5083 - def __str__(self):
5084 return self.db._adapter.expand(self)
5085
5086 - def __and__(self, other):
5087 return Query(self.db,self.db._adapter.AND,self,other)
5088
5089 - def __or__(self, other):
5090 return Query(self.db,self.db._adapter.OR,self,other)
5091
5092 - def __invert__(self):
5093 if self.op==self.db._adapter.NOT: 5094 return self.first 5095 return Query(self.db,self.db._adapter.NOT,self)
5096 5097 5098 regex_quotes = re.compile("'[^']*'") 5099 5100
5101 -def xorify(orderby):
5102 if not orderby: 5103 return None 5104 orderby2 = orderby[0] 5105 for item in orderby[1:]: 5106 orderby2 = orderby2 | item 5107 return orderby2
5108 5109
5110 -class Set(object):
5111 5112 """ 5113 a Set represents a set of records in the database, 5114 the records are identified by the query=Query(...) object. 5115 normally the Set is generated by DAL.__call__(Query(...)) 5116 5117 given a set, for example 5118 set = db(db.users.name=='Max') 5119 you can: 5120 set.update(db.users.name='Massimo') 5121 set.delete() # all elements in the set 5122 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 5123 and take subsets: 5124 subset = set(db.users.id<5) 5125 """ 5126
5127 - def __init__(self, db, query):
5128 self.db = db 5129 self._db = db # for backward compatibility 5130 self.query = query
5131
5132 - def __call__(self, query):
5133 if isinstance(query,Table): 5134 query = query._id>0 5135 elif isinstance(query,Field): 5136 query = query!=None 5137 if self.query: 5138 return Set(self.db, self.query & query) 5139 else: 5140 return Set(self.db, query)
5141
5142 - def _count(self,distinct=None):
5143 return self.db._adapter._count(self.query,distinct)
5144
5145 - def _select(self, *fields, **attributes):
5146 return self.db._adapter._select(self.query,fields,attributes)
5147
5148 - def _delete(self):
5149 tablename=self.db._adapter.get_table(self.query) 5150 return self.db._adapter._delete(tablename,self.query)
5151
5152 - def _update(self, **update_fields):
5153 tablename = self.db._adapter.get_table(self.query) 5154 fields = self.db[tablename]._listify(update_fields,update=True) 5155 return self.db._adapter._update(tablename,self.query,fields)
5156
5157 - def isempty(self):
5158 return not self.select(limitby=(0,1))
5159
5160 - def count(self,distinct=None):
5161 return self.db._adapter.count(self.query,distinct)
5162
5163 - def select(self, *fields, **attributes):
5164 return self.db._adapter.select(self.query,fields,attributes)
5165
5166 - def delete(self):
5167 tablename=self.db._adapter.get_table(self.query) 5168 self.delete_uploaded_files() 5169 return self.db._adapter.delete(tablename,self.query)
5170
5171 - def update(self, **update_fields):
5172 tablename = self.db._adapter.get_table(self.query) 5173 fields = self.db[tablename]._listify(update_fields,update=True) 5174 if not fields: 5175 raise SyntaxError, "No fields to update" 5176 self.delete_uploaded_files(update_fields) 5177 return self.db._adapter.update(tablename,self.query,fields)
5178
5179 - def delete_uploaded_files(self, upload_fields=None):
5180 table = self.db[self.db._adapter.tables(self.query)[0]] 5181 # ## mind uploadfield==True means file is not in DB 5182 if upload_fields: 5183 fields = upload_fields.keys() 5184 else: 5185 fields = table.fields 5186 fields = [f for f in fields if table[f].type == 'upload' 5187 and table[f].uploadfield == True 5188 and table[f].autodelete] 5189 if not fields: 5190 return 5191 for record in self.select(*[table[f] for f in fields]): 5192 for fieldname in fields: 5193 field = table[fieldname] 5194 oldname = record.get(fieldname, None) 5195 if not oldname: 5196 continue 5197 if upload_fields and oldname == upload_fields[fieldname]: 5198 continue 5199 uploadfolder = field.uploadfolder 5200 if not uploadfolder: 5201 uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads') 5202 if field.uploadseparate: 5203 items = oldname.split('.') 5204 uploadfolder = os.path.join(uploadfolder, 5205 "%s.%s" % (items[0], items[1]), 5206 items[2][:2]) 5207 oldpath = os.path.join(uploadfolder, oldname) 5208 if os.path.exists(oldpath): 5209 os.unlink(oldpath)
5210 5211
5212 -def update_record(pack, a={}):
5213 (colset, table, id) = pack 5214 b = a or dict(colset) 5215 c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id']) 5216 table._db(table._id==id).update(**c) 5217 for (k, v) in c.items(): 5218 colset[k] = v
5219 5220
5221 -class Rows(object):
5222 5223 """ 5224 A wrapper for the return value of a select. It basically represents a table. 5225 It has an iterator and each row is represented as a dictionary. 5226 """ 5227 5228 # ## TODO: this class still needs some work to care for ID/OID 5229
5230 - def __init__( 5231 self, 5232 db=None, 5233 records=[], 5234 colnames=[], 5235 compact=True, 5236 rawrows=None 5237 ):
5238 self.db = db 5239 self.records = records 5240 self.colnames = colnames 5241 self.compact = compact 5242 self.response = rawrows
5243
5244 - def setvirtualfields(self,**keyed_virtualfields):
5245 if not keyed_virtualfields: 5246 return self 5247 for row in self.records: 5248 for (tablename,virtualfields) in keyed_virtualfields.items(): 5249 attributes = dir(virtualfields) 5250 virtualfields.__dict__.update(row) 5251 if not tablename in row: 5252 box = row[tablename] = Row() 5253 else: 5254 box = row[tablename] 5255 for attribute in attributes: 5256 if attribute[0] != '_': 5257 method = getattr(virtualfields,attribute) 5258 if hasattr(method,'im_func') and method.im_func.func_code.co_argcount: 5259 box[attribute]=method() 5260 return self
5261
5262 - def __and__(self,other):
5263 if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects' 5264 records = self.records+other.records 5265 return Rows(self.db,records,self.colnames)
5266
5267 - def __or__(self,other):
5268 if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects' 5269 records = self.records 5270 records += [record for record in other.records \ 5271 if not record in records] 5272 return Rows(self.db,records,self.colnames)
5273
5274 - def __nonzero__(self):
5275 if len(self.records): 5276 return 1 5277 return 0
5278
5279 - def __len__(self):
5280 return len(self.records)
5281
5282 - def __getslice__(self, a, b):
5283 return Rows(self.db,self.records[a:b],self.colnames)
5284
5285 - def __getitem__(self, i):
5286 row = self.records[i] 5287 keys = row.keys() 5288 if self.compact and len(keys) == 1 and keys[0] != '_extra': 5289 return row[row.keys()[0]] 5290 return row
5291
5292 - def __iter__(self):
5293 """ 5294 iterator over records 5295 """ 5296 5297 for i in xrange(len(self)): 5298 yield self[i]
5299
5300 - def __str__(self):
5301 """ 5302 serializes the table into a csv file 5303 """ 5304 5305 s = cStringIO.StringIO() 5306 self.export_to_csv_file(s) 5307 return s.getvalue()
5308
5309 - def first(self):
5310 if not self.records: 5311 return None 5312 return self[0]
5313
5314 - def last(self):
5315 if not self.records: 5316 return None 5317 return self[-1]
5318
5319 - def find(self,f):
5320 """ 5321 returns a new Rows object, a subset of the original object, 5322 filtered by the function f 5323 """ 5324 if not self.records: 5325 return Rows(self.db, [], self.colnames) 5326 records = [] 5327 for i in range(0,len(self)): 5328 row = self[i] 5329 if f(row): 5330 records.append(self.records[i]) 5331 return Rows(self.db, records, self.colnames)
5332
5333 - def exclude(self, f):
5334 """ 5335 removes elements from the calling Rows object, filtered by the function f, 5336 and returns a new Rows object containing the removed elements 5337 """ 5338 if not self.records: 5339 return Rows(self.db, [], self.colnames) 5340 removed = [] 5341 i=0 5342 while i<len(self): 5343 row = self[i] 5344 if f(row): 5345 removed.append(self.records[i]) 5346 del self.records[i] 5347 else: 5348 i += 1 5349 return Rows(self.db, removed, self.colnames)
5350
5351 - def sort(self, f, reverse=False):
5352 """ 5353 returns a list of sorted elements (not sorted in place) 5354 """ 5355 return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames)
5356
5357 - def as_list(self, 5358 compact=True, 5359 storage_to_dict=True, 5360 datetime_to_str=True):
5361 """ 5362 returns the data as a list or dictionary. 5363 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 5364 :param datetime_to_str: convert datetime fields as strings (default True) 5365 """ 5366 (oc, self.compact) = (self.compact, compact) 5367 if storage_to_dict: 5368 items = [item.as_dict(datetime_to_str) for item in self] 5369 else: 5370 items = [item for item in self] 5371 self.compact = compact 5372 return items
5373 5374
5375 - def as_dict(self, 5376 key='id', 5377 compact=True, 5378 storage_to_dict=True, 5379 datetime_to_str=True):
5380 """ 5381 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 5382 5383 :param key: the name of the field to be used as dict key, normally the id 5384 :param compact: ? (default True) 5385 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 5386 :param datetime_to_str: convert datetime fields as strings (default True) 5387 """ 5388 rows = self.as_list(compact, storage_to_dict, datetime_to_str) 5389 if isinstance(key,str) and key.count('.')==1: 5390 (table, field) = key.split('.') 5391 return dict([(r[table][field],r) for r in rows]) 5392 elif isinstance(key,str): 5393 return dict([(r[key],r) for r in rows]) 5394 else: 5395 return dict([(key(r),r) for r in rows])
5396
5397 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
5398 """ 5399 export data to csv, the first line contains the column names 5400 5401 :param ofile: where the csv must be exported to 5402 :param null: how null values must be represented (default '<NULL>') 5403 :param delimiter: delimiter to separate values (default ',') 5404 :param quotechar: character to use to quote string values (default '"') 5405 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 5406 :param represent: use the fields .represent value (default False) 5407 :param colnames: list of column names to use (default self.colnames) 5408 This will only work when exporting rows objects!!!! 5409 DO NOT use this with db.export_to_csv() 5410 """ 5411 delimiter = kwargs.get('delimiter', ',') 5412 quotechar = kwargs.get('quotechar', '"') 5413 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 5414 represent = kwargs.get('represent', False) 5415 writer = csv.writer(ofile, delimiter=delimiter, 5416 quotechar=quotechar, quoting=quoting) 5417 colnames = kwargs.get('colnames', self.colnames) 5418 # a proper csv starting with the column names 5419 writer.writerow(colnames) 5420 5421 def none_exception(value): 5422 """ 5423 returns a cleaned up value that can be used for csv export: 5424 - unicode text is encoded as such 5425 - None values are replaced with the given representation (default <NULL>) 5426 """ 5427 if value is None: 5428 return null 5429 elif isinstance(value, unicode): 5430 return value.encode('utf8') 5431 elif isinstance(value,Reference): 5432 return int(value) 5433 elif hasattr(value, 'isoformat'): 5434 return value.isoformat()[:19].replace('T', ' ') 5435 elif isinstance(value, (list,tuple)): # for type='list:..' 5436 return bar_encode(value) 5437 return value
5438 5439 for record in self: 5440 row = [] 5441 for col in colnames: 5442 if not table_field.match(col): 5443 row.append(record._extra[col]) 5444 else: 5445 (t, f) = col.split('.') 5446 field = self.db[t][f] 5447 if isinstance(record.get(t, None), (Row,dict)): 5448 value = record[t][f] 5449 else: 5450 value = record[f] 5451 if represent and field.represent: 5452 value = field.represent(value) 5453 row.append(none_exception(value)) 5454 writer.writerow(row)
5455
5456 - def xml(self):
5457 """ 5458 serializes the table using sqlhtml.SQLTABLE (if present) 5459 """ 5460 5461 import sqlhtml 5462 return sqlhtml.SQLTABLE(self).xml()
5463
5464 - def json(self, mode='object', default=None):
5465 """ 5466 serializes the table to a JSON list of objects 5467 """ 5468 mode = mode.lower() 5469 if not mode in ['object', 'array']: 5470 raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode 5471 5472 def inner_loop(record, col): 5473 (t, f) = col.split('.') 5474 res = None 5475 if not table_field.match(col): 5476 res = record._extra[col] 5477 else: 5478 if isinstance(record.get(t, None), Row): 5479 res = record[t][f] 5480 else: 5481 res = record[f] 5482 if mode == 'object': 5483 return (f, res) 5484 else: 5485 return res
5486 5487 if mode == 'object': 5488 items = [dict([inner_loop(record, col) for col in 5489 self.colnames]) for record in self] 5490 else: 5491 items = [[inner_loop(record, col) for col in self.colnames] 5492 for record in self] 5493 if have_serializers: 5494 return serializers.json(items,default=default or serializers.custom_json) 5495 else: 5496 import simplejson 5497 return simplejson.dumps(items) 5498
5499 -def Rows_unpickler(data):
5500 return cPickle.loads(data)
5501
5502 -def Rows_pickler(data):
5503 return Rows_unpickler, \ 5504 (cPickle.dumps(data.as_list(storage_to_dict=True, 5505 datetime_to_str=False)),)
5506 5507 copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler) 5508 5509 5510 ################################################################################ 5511 # dummy function used to define some doctests 5512 ################################################################################ 5513
5514 -def test_all():
5515 """ 5516 5517 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 5518 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 5519 >>> tmp = db.define_table('users',\ 5520 Field('stringf', 'string', length=32, required=True),\ 5521 Field('booleanf', 'boolean', default=False),\ 5522 Field('passwordf', 'password', notnull=True),\ 5523 Field('uploadf', 'upload'),\ 5524 Field('blobf', 'blob'),\ 5525 Field('integerf', 'integer', unique=True),\ 5526 Field('doublef', 'double', unique=True,notnull=True),\ 5527 Field('datef', 'date', default=datetime.date.today()),\ 5528 Field('timef', 'time'),\ 5529 Field('datetimef', 'datetime'),\ 5530 migrate='test_user.table') 5531 5532 Insert a field 5533 5534 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 5535 uploadf=None, integerf=5, doublef=3.14,\ 5536 datef=datetime.date(2001, 1, 1),\ 5537 timef=datetime.time(12, 30, 15),\ 5538 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 5539 1 5540 5541 Drop the table 5542 5543 >>> db.users.drop() 5544 5545 Examples of insert, select, update, delete 5546 5547 >>> tmp = db.define_table('person',\ 5548 Field('name'),\ 5549 Field('birth','date'),\ 5550 migrate='test_person.table') 5551 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 5552 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 5553 5554 commented len(db().select(db.person.ALL)) 5555 commented 2 5556 5557 >>> me = db(db.person.id==person_id).select()[0] # test select 5558 >>> me.name 5559 'Massimo' 5560 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 5561 1 5562 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 5563 1 5564 5565 Update a single record 5566 5567 >>> me.update_record(name=\"Max\") 5568 >>> me.name 5569 'Max' 5570 5571 Examples of complex search conditions 5572 5573 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 5574 1 5575 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 5576 1 5577 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 5578 1 5579 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 5580 >>> me.name 5581 'Max' 5582 5583 Examples of search conditions using extract from date/datetime/time 5584 5585 >>> len(db(db.person.birth.month()==12).select()) 5586 1 5587 >>> len(db(db.person.birth.year()>1900).select()) 5588 1 5589 5590 Example of usage of NULL 5591 5592 >>> len(db(db.person.birth==None).select()) ### test NULL 5593 0 5594 >>> len(db(db.person.birth!=None).select()) ### test NULL 5595 1 5596 5597 Examples of search conditions using lower, upper, and like 5598 5599 >>> len(db(db.person.name.upper()=='MAX').select()) 5600 1 5601 >>> len(db(db.person.name.like('%ax')).select()) 5602 1 5603 >>> len(db(db.person.name.upper().like('%AX')).select()) 5604 1 5605 >>> len(db(~db.person.name.upper().like('%AX')).select()) 5606 0 5607 5608 orderby, groupby and limitby 5609 5610 >>> people = db().select(db.person.name, orderby=db.person.name) 5611 >>> order = db.person.name|~db.person.birth 5612 >>> people = db().select(db.person.name, orderby=order) 5613 5614 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 5615 5616 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 5617 5618 Example of one 2 many relation 5619 5620 >>> tmp = db.define_table('dog',\ 5621 Field('name'),\ 5622 Field('birth','date'),\ 5623 Field('owner',db.person),\ 5624 migrate='test_dog.table') 5625 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 5626 1 5627 5628 A simple JOIN 5629 5630 >>> len(db(db.dog.owner==db.person.id).select()) 5631 1 5632 5633 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 5634 1 5635 5636 Drop tables 5637 5638 >>> db.dog.drop() 5639 >>> db.person.drop() 5640 5641 Example of many 2 many relation and Set 5642 5643 >>> tmp = db.define_table('author', Field('name'),\ 5644 migrate='test_author.table') 5645 >>> tmp = db.define_table('paper', Field('title'),\ 5646 migrate='test_paper.table') 5647 >>> tmp = db.define_table('authorship',\ 5648 Field('author_id', db.author),\ 5649 Field('paper_id', db.paper),\ 5650 migrate='test_authorship.table') 5651 >>> aid = db.author.insert(name='Massimo') 5652 >>> pid = db.paper.insert(title='QCD') 5653 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 5654 5655 Define a Set 5656 5657 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 5658 >>> rows = authored_papers.select(db.author.name, db.paper.title) 5659 >>> for row in rows: print row.author.name, row.paper.title 5660 Massimo QCD 5661 5662 Example of search condition using belongs 5663 5664 >>> set = (1, 2, 3) 5665 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 5666 >>> print rows[0].title 5667 QCD 5668 5669 Example of search condition using nested select 5670 5671 >>> nested_select = db()._select(db.authorship.paper_id) 5672 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 5673 >>> print rows[0].title 5674 QCD 5675 5676 Example of expressions 5677 5678 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 5679 >>> db(mynumber.id>0).delete() 5680 0 5681 >>> for i in range(10): tmp = mynumber.insert(x=i) 5682 >>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum()) 5683 45 5684 5685 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 5686 5 5687 5688 Output in csv 5689 5690 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 5691 author.name,paper.title\r 5692 Massimo,QCD 5693 5694 Delete all leftover tables 5695 5696 >>> DAL.distributed_transaction_commit(db) 5697 5698 >>> db.mynumber.drop() 5699 >>> db.authorship.drop() 5700 >>> db.author.drop() 5701 >>> db.paper.drop() 5702 """
5703 ################################################################################ 5704 # deprecated since the new DAL; here only for backward compatibility 5705 ################################################################################ 5706 5707 SQLField = Field 5708 SQLTable = Table 5709 SQLXorable = Expression 5710 SQLQuery = Query 5711 SQLSet = Set 5712 SQLRows = Rows 5713 SQLStorage = Row 5714 SQLDB = DAL 5715 GQLDB = DAL 5716 DAL.Field = Field # was necessary in gluon/globals.py session.connect 5717 DAL.Table = Table # was necessary in gluon/globals.py session.connect 5718 5719 ################################################################################ 5720 # run tests 5721 ################################################################################ 5722 5723 if __name__ == '__main__': 5724 import doctest 5725 doctest.testmod() 5726