[Checkins] SVN: Products.Ape/trunk/ imported a CVS export of Ape
Thomas Lotze
tl at gocept.com
Wed Sep 12 09:28:44 EDT 2007
Log message for revision 79589:
imported a CVS export of Ape
Changed:
A Products.Ape/trunk/
A Products.Ape/trunk/CHANGES.txt
A Products.Ape/trunk/README.txt
A Products.Ape/trunk/SQL.txt
A Products.Ape/trunk/__init__.py
A Products.Ape/trunk/component.xml
A Products.Ape/trunk/datatypes.py
A Products.Ape/trunk/doc/
A Products.Ape/trunk/doc/apexml.txt
A Products.Ape/trunk/doc/outline.txt
A Products.Ape/trunk/doc/tutorial_slides.sxi
A Products.Ape/trunk/lib/
A Products.Ape/trunk/lib/apelib/
A Products.Ape/trunk/lib/apelib/__init__.py
A Products.Ape/trunk/lib/apelib/config/
A Products.Ape/trunk/lib/apelib/config/__init__.py
A Products.Ape/trunk/lib/apelib/config/apeconf.py
A Products.Ape/trunk/lib/apelib/config/common.py
A Products.Ape/trunk/lib/apelib/config/interfaces.py
A Products.Ape/trunk/lib/apelib/core/
A Products.Ape/trunk/lib/apelib/core/__init__.py
A Products.Ape/trunk/lib/apelib/core/classifiers.py
A Products.Ape/trunk/lib/apelib/core/events.py
A Products.Ape/trunk/lib/apelib/core/gateways.py
A Products.Ape/trunk/lib/apelib/core/interfaces.py
A Products.Ape/trunk/lib/apelib/core/io.py
A Products.Ape/trunk/lib/apelib/core/mapper.py
A Products.Ape/trunk/lib/apelib/core/oidgen.py
A Products.Ape/trunk/lib/apelib/core/schemas.py
A Products.Ape/trunk/lib/apelib/core/serializers.py
A Products.Ape/trunk/lib/apelib/fs/
A Products.Ape/trunk/lib/apelib/fs/__init__.py
A Products.Ape/trunk/lib/apelib/fs/annotated.py
A Products.Ape/trunk/lib/apelib/fs/base.py
A Products.Ape/trunk/lib/apelib/fs/classification.py
A Products.Ape/trunk/lib/apelib/fs/connection.py
A Products.Ape/trunk/lib/apelib/fs/fileops.py
A Products.Ape/trunk/lib/apelib/fs/interfaces.py
A Products.Ape/trunk/lib/apelib/fs/oidtable.py
A Products.Ape/trunk/lib/apelib/fs/params.py
A Products.Ape/trunk/lib/apelib/fs/properties.py
A Products.Ape/trunk/lib/apelib/fs/security.py
A Products.Ape/trunk/lib/apelib/fs/structure.py
A Products.Ape/trunk/lib/apelib/sql/
A Products.Ape/trunk/lib/apelib/sql/__init__.py
A Products.Ape/trunk/lib/apelib/sql/classification.py
A Products.Ape/trunk/lib/apelib/sql/dbapi.py
A Products.Ape/trunk/lib/apelib/sql/ingres.py
A Products.Ape/trunk/lib/apelib/sql/interfaces.py
A Products.Ape/trunk/lib/apelib/sql/mysql.py
A Products.Ape/trunk/lib/apelib/sql/oidgen.py
A Products.Ape/trunk/lib/apelib/sql/postgresql.py
A Products.Ape/trunk/lib/apelib/sql/properties.py
A Products.Ape/trunk/lib/apelib/sql/security.py
A Products.Ape/trunk/lib/apelib/sql/sqlbase.py
A Products.Ape/trunk/lib/apelib/sql/structure.py
A Products.Ape/trunk/lib/apelib/sql/table.py
A Products.Ape/trunk/lib/apelib/tests/
A Products.Ape/trunk/lib/apelib/tests/__init__.py
A Products.Ape/trunk/lib/apelib/tests/correct.png
A Products.Ape/trunk/lib/apelib/tests/serialtestbase.py
A Products.Ape/trunk/lib/apelib/tests/testall.py
A Products.Ape/trunk/lib/apelib/tests/testimpl.py
A Products.Ape/trunk/lib/apelib/tests/testio.py
A Products.Ape/trunk/lib/apelib/tests/testparams.py
A Products.Ape/trunk/lib/apelib/tests/testscanner.py
A Products.Ape/trunk/lib/apelib/tests/testserialization.py
A Products.Ape/trunk/lib/apelib/tests/testsqlimpl.py
A Products.Ape/trunk/lib/apelib/tests/teststorage.py
A Products.Ape/trunk/lib/apelib/tests/testzodbtables.py
A Products.Ape/trunk/lib/apelib/tests/testzope2fs.py
A Products.Ape/trunk/lib/apelib/tests/testzope2sql.py
A Products.Ape/trunk/lib/apelib/tests/zope2testbase.py
A Products.Ape/trunk/lib/apelib/zodb3/
A Products.Ape/trunk/lib/apelib/zodb3/__init__.py
A Products.Ape/trunk/lib/apelib/zodb3/connection.py
A Products.Ape/trunk/lib/apelib/zodb3/consts.py
A Products.Ape/trunk/lib/apelib/zodb3/db.py
A Products.Ape/trunk/lib/apelib/zodb3/interfaces.py
A Products.Ape/trunk/lib/apelib/zodb3/notes.txt
A Products.Ape/trunk/lib/apelib/zodb3/resource.py
A Products.Ape/trunk/lib/apelib/zodb3/scanner.py
A Products.Ape/trunk/lib/apelib/zodb3/serializers.py
A Products.Ape/trunk/lib/apelib/zodb3/storage.py
A Products.Ape/trunk/lib/apelib/zodb3/utils.py
A Products.Ape/trunk/lib/apelib/zodb3/zodbtables.py
A Products.Ape/trunk/lib/apelib/zope2/
A Products.Ape/trunk/lib/apelib/zope2/__init__.py
A Products.Ape/trunk/lib/apelib/zope2/apeconf.xml
A Products.Ape/trunk/lib/apelib/zope2/classifier.py
A Products.Ape/trunk/lib/apelib/zope2/mapper.py
A Products.Ape/trunk/lib/apelib/zope2/ofsserial.py
A Products.Ape/trunk/lib/apelib/zope2/products.py
A Products.Ape/trunk/lib/apelib/zope2/scripts.py
A Products.Ape/trunk/lib/apelib/zope2/security.py
A Products.Ape/trunk/lib/apelib/zope2/setup/
A Products.Ape/trunk/lib/apelib/zope2/setup/__init__.py
A Products.Ape/trunk/lib/apelib/zope2/setup/patches.py
A Products.Ape/trunk/version.txt
-=-
Added: Products.Ape/trunk/CHANGES.txt
===================================================================
--- Products.Ape/trunk/CHANGES.txt (rev 0)
+++ Products.Ape/trunk/CHANGES.txt 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,284 @@
+
+Version 1.1
+
+ - Chris Kratz tracked down and solved a problem with the persistence
+ of proxy roles.
+
+ - Ape now tries to commit or abort relational database transactions
+ when a ZODB connection reads but doesn't write objects. It does
+ this by starting a ZODB transaction even when reading.
+
+ - __ac_roles__ attributes are now deserialized as a tuple.
+
+ - Page templates were being written to disk more often than
+ necessary. The _bind_names attribute is now handled by a new
+ serializer and page templates are saved only when they change.
+
+ - Fixed compatibility with recent versions of MySQLdb that use the
+ array.array type for binary database columns.
+
+ - Ape was not serializing the standard roles on the root folder,
+ only user-defined roles. Fixed with help from Tim Connor.
+
+Version 1.0
+
+ - Ape now supports mapping subclasses. Until now, mappers were
+ registered for classes but not for all subclasses. Now, a mapper
+ applies to subclasses as well, unless the configuration file
+ specifies 'exact-class'.
+
+ - Revised the configuration file format for simplification and to
+ allow mapping subclasses. The new 'load' and 'store' directives
+ make the configuration file easier to understand.
+
+ - There is a new way to convert between database-specific types and
+ generic types in apelib.sql. See the RDBMSColumn class and the
+ 'column_factories_by_name' attribute of AbstractSQLConnection. OIDs
+ take advantage of this feature; OIDs are now sent to the database as
+ integers.
+
+ - The SQL package now uses column and row sequence schemas for table
+ definitions.
+
+ - Pre-compiled queries should now be easy to implement. (Here is
+ the plan: table.py should wrap the sql in a PrecompiledQuery,
+ which execute() should enhance once the query is compiled, then
+ table.py should cache the query.)
+
+ - Added an Ingres database driver contributed by Computer Associates.
+ See http://www3.ca.com/Solutions/Collateral.asp?CID=59656
+
+ - Removed support for DBTab, the method Zope 2.6 uses for mounting
+ databases. The method Zope 2.7 uses is much easier to maintain.
+
+
+Version 0.8.1
+
+ - Ape now works harder at consistently generating the same
+ representation of a pickle. This is important for version control.
+
+ - Some databases require an explicit "not null" constraint for primary
+ keys. Ape now provides it.
+
+ - Fixed a NameError in the Zope 2 classifier.
+
+
+Version 0.8
+
+ - APE used to stand for Adaptable PErsistence. Now it stands for
+ Adaptable Persistence Engine. Someone finally found a good word
+ that starts with E, but I'm not sure who deserves the credit.
+
+ - Major restructuring to reduce the number of concepts in Ape.
+
+ Ape supported a concept called domain mappers. Domain mappers
+ allowed you to redefine object mapping policies in the context of
+ a particular mapped object. This feature was a theoretical
+ advantage, but it necessitated a large number of extra concepts in
+ Ape: keychains, keys, mapper trees, multiple classifiers, and so
+ forth. These extra concepts were a major stumbling block for
+ people who wanted to learn about Ape, and the benefit of domain
+ mappers was uncertain.
+
+ All of those concepts have been removed. Keychains and keys have
+ been replaced with simple string OIDs. There is now a flat
+ namespace of mappers instead of a tree. Only one classifier and
+ one OID generator are used in any object database.
+
+ This necessitated changes to all of the interfaces and most of the
+ code. Hopefully, though, all of the changes will make Ape easier
+ to learn and use. The interfaces now use shorter method names and
+ attributes instead of methods where possible, making many
+ operations less opaque.
+
+ Ape also no longer uses fixed OIDs very often, since they were
+ also a major source of confusion. Standard ZODB always allocates
+ a new OID for new objects, but Ape makes it possible to reuse
+ OIDs. You should not use this capability unless you understand
+ the guts of a persistent object system; otherwise you'll generate
+ ConflictErrors that will make you tear out your hair. Therefore,
+ Ape no longer shows you how to use fixed OIDs. Developers will
+ have to figure it out on their own.
+
+ - The ZODB root object is now stored on the filesystem. It used to
+ be a virtual object that only contained the Zope Application object.
+ The true root object is now stored in a subdirectory of the
+ application root called "_root".
+
+ - Removed the concepts of "classified state" and "hints", now
+ relying on the existing concepts of classification and state. This
+ primarily involved changes to classifiers and the apelib.core.io
+ module.
+
+ - Implemented the folder item classification optimization. Now,
+ when Ape loads a folder, it passes along the classification of each
+ subitem as part of the containing folder's state. This means fewer
+ round-trips.
+
+ - Pickled remainders are now encoded in base64 and stored in the
+ properties file. Ape no longer creates .remainder files, although
+ it will read them. This reduces the number of files Ape creates.
+
+ - Ape can now successfully act as the main Zope database.
+ Woohoo! Requires Zope 2.7.
+
+ - Ape now uses arbitrary OIDs on the filesystem rather than using
+ paths as OIDs. This solved problems with moving and replacing
+ objects. It also removed the need for a monkey patch and several
+ hacks. Overall, connections with the filesystem are now much more
+ robust, but it is also necessary to maintain an in-memory index of
+ OIDs to paths.
+
+ - Restructured SQL database connections to make it easier to cope
+ with the differences between databases. There is no longer a
+ query generator object. Instead, the connection generates and
+ executes queries on the fly.
+
+ - SQLMultiTableProperties, previously experimental, is now the
+ default. Now, in relational databases, class-defined properties
+ are now assigned their own columns in automatically generated
+ class-specific tables. Creates columns for strings, integers, and
+ booleans, but more complicated types still get stored in the
+ variable property table.
+
+
+Version 0.7.1
+
+ - Fixed several bugs that made Ape fail under Python 2.1 and Zope 2.6.
+ Thanks to Seb Bacon for discovering the bugs.
+
+
+Version 0.7
+
+ - Ape now uses XML to configure mappers. See doc/apexml.txt. Zope
+ Products can expand the mapper configuration just by supplying an
+ apeconf.xml file.
+
+ - Ape now periodically scans the filesystem for changes to loaded
+ objects. This means you can make changes on the filesystem and see
+ the changes after the next scan (the default is to scan every 10
+ seconds). Previously, it was necessary to set the ZODB cache size
+ to 0 to do this.
+
+ - Experimental code persists Zope properties using one table per
+ class. At this point it's just a proof of concept, since there are
+ a few technical issues with this.
+
+ - Experimental code lets you configure Ape mount points using the
+ new zope.conf file in Zope 2.7. See component.xml.
+
+ - There is now a set of facades in apelib.core.io that should make
+ it easier to use Ape mappers outside ZODB. (The GatewayIO and
+ ObjectSystemIO classes are stable, but the ExportImport class is
+ not.)
+
+ - In SQL, the classification table schema changed, so Ape started
+ using a table by a different name. (The meta_type is now irrelevant
+ and the mapper_name is now authoritative.) Unless you move the old
+ data to the new table, any data you've already stored in an RDBMS
+ will no longer be visible to Ape. At some point we need to come up
+ with a standard way to migrate tables to match new schemas.
+
+ - The standard classifier is now less specific to Zope 2, bringing
+ Ape closer to mapping arbitrary Python objects.
+
+ - Started code for importing and exporting using mappers, including
+ the ability to export/import an archive.
+
+
+Version 0.6
+
+ - Renamed to "Ape", short for Adaptable PErsistence. Put most of
+ the code in apelib, a new top-level package usable outside Zope and
+ ZODB. The top-level package is located inside the Ape product, but
+ the Ape product imports the package without requiring PYTHONPATH
+ adjustments.
+
+ - Changed naming conventions to fit Zope 3. Modules now have
+ lowercase names and usually define more than one class. The new
+ layout is more compact. As a side benefit, this led to a 30%
+ reduction in lines of code, falling to 7200. If Guido is right and
+ one person can only deal with 10,000 lines of code at a time, we're
+ safe again.
+
+ - Added MySQL support, tested with MySQL-Max 4.0.11. A recent
+ version of the Python MySQLdb wrapper and transaction support in
+ MySQL are required.
+
+ - Renamed some classes and interfaces:
+
+ ObjectSerializer to CompositeSerializer
+ ObjectGateway to CompositeGateway
+ IObjectSerializer to IFullObjectSerializer
+ ObjectMapper to Mapper
+ IObjectMapper to IMapper
+ IAspectSerializer to ISerializer
+
+ - Enhanced filename extension processing.
+
+ - Fixed loading of ZClass instances.
+
+ - Simplified the configuration of object mappers using "base"
+ mappers. This should make it easier to write new mappers.
+
+ - Security attributes now get serialized in a natural way. The
+ security attributes include the executable owner, local roles, role
+ definitions, proxy roles, and permission mapping.
+
+ - Used text mode when reading/writing text files, which should
+ work better on Windows.
+
+ - Moved remainder pickles to their own file, ".remainder".
+
+ - Allowed object names to start with a dot (unless they would
+ conflict with .properties or .remainder files.)
+
+ - Made _p_mtime reflect the actual last modified time of a file or
+ database record. To do this with ZODB 3, it was necessary to stop
+ using _p_serial to store hashes. Hopefully ZODB 4 already keeps
+ _p_mtime and _p_serial distinct.
+
+ - Fixed assorted bugs.
+
+
+Version 0.5
+
+ - Certain kinds of Zope objects stored on the filesystem now get an
+ automatic filename extension. So, for example, if you create a Zope
+ Page Template called "standard_template", on the filesystem it will
+ show up as "standard_template.html", making it easy to edit.
+
+ - Integrated work by Christian Zagrodnick: added mappers for Python
+ Scripts, DTML Methods, DTML Documents, and ZSQLMethods. Thanks!
+
+ - Added some caching in FSConnection to improve performance.
+
+ - Fixed storage of selection properties. The select_variable was
+ being forgotten.
+
+ - Fixed moving of objects between databases (using cut and paste).
+
+
+Version 0.4.2
+
+ - Corrected serialization of large OFS.File objects. OFS.File uses
+ some ZODB trickery to store large files, but AdaptableStorage needs
+ to be aware of that trickery.
+
+ - Made the remainder pickler properly restore cyclic references to
+ the persistent object. Until now, it accidentally made internal
+ copies of the object.
+
+ - Although it involved no changes to AdaptableStorage, fixed some
+ bugs in ZCatalog that prevented them from being stored by
+ AdaptableStorage.
+
+ - Fixed object copying. The object copier assumed it could ghostify
+ objects immediately, but it turns out that the pickling machinery
+ sometimes needs to revisit objects.
+
+
+Version 0.4.1
+
+ First public release.
+
Property changes on: Products.Ape/trunk/CHANGES.txt
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/README.txt
===================================================================
--- Products.Ape/trunk/README.txt (rev 0)
+++ Products.Ape/trunk/README.txt 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,47 @@
+
+Quick Start
+===========
+
+Assuming you already have Zope set up and working, follow these steps
+to get started with Ape.
+
+1. Check your Zope version. These instructions require Zope 2.7.
+
+2. Add the Ape product to Zope by placing it in the Products
+directory. If you're using an INSTANCE_HOME setup, place it with your
+other add-on products.
+
+3. Open component.xml, provided by the Ape product, in a text editor.
+Copy one of the sample configurations to your zope.conf, changing it
+to fit your system.
+
+4. Start Zope.
+
+5. If you mounted the database somewhere other than the root, visit
+the Zope management interface. Select "ZODB Mount Point" from the
+"add" drop-down. Click the "Create selected mount points" button.
+
+6. Visit the portion of the object database stored using Ape and add
+things to it. As you add objects, they will appear on the filesystem
+or in your database.
+
+
+Tutorial
+========
+
+A tutorial on the Ape library was prepared and delivered at PyCon
+2003. The text of the tutorial, called 'outline.txt', is in the 'doc'
+subdirectory. The accompanying slides, in OpenOffice Impress format,
+are somewhat out of date, but can be downloaded at the following URL:
+
+http://cvs.zope.org/Products/Ape/doc/tutorial_slides.sxi?rev=HEAD&content-type=application/octet-stream
+
+
+Adding New Object Types
+=======================
+
+After reading the tutorial, see doc/apexml.txt for instructions on how
+to make Ape aware of other object types. Although Ape can store any
+kind of ZODB object, Ape stores a Python pickle when no specific
+mapper is provided for a class. Use apeconf.xml files to configure
+new mappers.
Property changes on: Products.Ape/trunk/README.txt
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/SQL.txt
===================================================================
--- Products.Ape/trunk/SQL.txt (rev 0)
+++ Products.Ape/trunk/SQL.txt 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,22 @@
+
+These are notes on how to set up various databases for testing
+purposes. For a real database, you should set up access controls.
+
+Postgres
+========
+
+rm -rf ~/pgsql
+initdb ~/pgsql
+postmaster -D ~/pgsql
+createdb
+
+
+MySQL
+=====
+
+rm -rf /var/lib/mysql
+mysql_install_db
+/etc/init.d/mysql start
+mysql
+create database ape;
+grant all privileges on ape.* to 'shane'@'localhost';
Property changes on: Products.Ape/trunk/SQL.txt
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/__init__.py
===================================================================
--- Products.Ape/trunk/__init__.py (rev 0)
+++ Products.Ape/trunk/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,33 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Ape -- Adaptable Persistence Engine.
+
+$Id$
+"""
+
+import os
+import sys
+
+# Import the copy of apelib from 'lib' by temporarily changing sys.path.
+old_path = sys.path[:]
+here = __path__[0]
+sys.path.insert(0, os.path.join(here, 'lib'))
+try:
+ import apelib
+finally:
+ # Restore sys.path
+ sys.path[:] = old_path
+
+from apelib.zope2.setup import patches
+patches.apply_patches()
Property changes on: Products.Ape/trunk/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/component.xml
===================================================================
--- Products.Ape/trunk/component.xml (rev 0)
+++ Products.Ape/trunk/component.xml 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+
+<!-- Ape storage and database ZConfig section setup
+
+To connect to the filesystem, add the following to zope.conf:
+
+
+%import Products.Ape
+<ape-db fs>
+ <ape-storage>
+ mapper-variation filesystem
+ <ape-fs-connection fs>
+ basepath $INSTANCE/var/myfs
+ </ape-fs-connection>
+ </ape-storage>
+ mount-point /fs
+ scan-interval 10
+</ape-db>
+
+
+For a PostgreSQL connection (requires the psycopg module):
+
+
+%import Products.Ape
+<ape-db sql>
+ <ape-storage>
+ mapper-variation sql
+ <ape-dbapi-connection db>
+ connection-class apelib.sql.postgresql.PostgreSQLConnection
+ module-name psycopg
+ connect-expression connect('')
+ </ape-dbapi-connection>
+ </ape-storage>
+ mount-point /sql
+</ape-db>
+
+
+For a MySQL connection (requires the MySQLdb module):
+
+
+%import Products.Ape
+<ape-db sql>
+ <ape-storage>
+ mapper-variation sql
+ <ape-dbapi-connection db>
+ connection-class apelib.sql.mysql.MySQLConnection
+ module-name MySQLdb
+ connect-expression connect(db='ape', user='me', passwd='pw', host='localhost')
+ </ape-dbapi-connection>
+ </ape-storage>
+ mount-point /sql
+</ape-db>
+
+-->
+
+
+<component prefix="Products.Ape.datatypes">
+
+ <abstracttype name="apelib.Connection" />
+
+ <sectiontype name="ape-storage" implements="ZODB.storage"
+ datatype=".Storage">
+ <key name="mapper-variation" required="yes">
+ <description>
+ The name of the mapper variation to load from apeconf.xml
+ files. See Products/Ape/doc/apexml.txt.
+ </description>
+ </key>
+ <key name="debug-conflicts" required="no"
+ datatype="boolean" default="False">
+ <description>
+ If enabled, storage conflicts will be handled as runtime errors rather
+ than ZODB conflicts, making it easier to debug mappers.
+ </description>
+ </key>
+ <multisection type="apelib.Connection" name="+" attribute="connections">
+ <description>
+ Defines a database connector for use with this storage.
+ </description>
+ </multisection>
+ </sectiontype>
+
+
+ <sectiontype name="ape-fs-connection" implements="apelib.Connection"
+ datatype=".FSConnection">
+ <key name="basepath" required="yes">
+ <description>
+ The base filesystem path for the storage.
+ </description>
+ </key>
+ <key name="annotation-prefix" default=".">
+ <description>
+ The prefix to use for annotation files.
+ </description>
+ </key>
+ <key name="hidden-filenames" default="_">
+ <description>
+ A regular expression that determines which filenames should be
+ hidden from the object system. The default is '_', meaning
+ that filenames starting with an underscore do not appear in
+ the object system. This filter is in addition to the annotation
+ filename filtering.
+ </description>
+ </key>
+ </sectiontype>
+
+
+ <sectiontype name="ape-dbapi-connection" implements="apelib.Connection"
+ datatype=".DBAPIConnection">
+ <key name="connection-class" required="yes"
+ datatype="Zope.Startup.datatypes.importable_name">
+ <description>
+ A class that implements apelib.sql.interfaces.IRDBMSConnection.
+ </description>
+ </key>
+ <key name="module-name" required="yes">
+ <description>
+ The name of the DB-API module to use. (See PEP 249.)
+ </description>
+ </key>
+ <key name="connect-expression" required="yes">
+ <description>
+ The Python expression to use for connecting to the database.
+ The name 'connect' is in the namespace and is bound to the
+ 'connect' function from the DB-API module.
+ </description>
+ </key>
+ <key name="prefix" default="">
+ <description>
+ A prefix to use for all tables in the database.
+ </description>
+ </key>
+ </sectiontype>
+
+
+ <sectiontype name="ape-db" implements="ZODB.database"
+ datatype=".Database">
+ <key name="mapper-variation" required="no">
+ <description>
+ The name of the mapper variation to load from apeconf.xml
+ files. See Products/Ape/doc/apexml.txt.
+ </description>
+ </key>
+ <key name="scan-interval" datatype="integer" default="10">
+ <description>
+ Cache scan interval in seconds. Set to 0 to disable scanning.
+ </description>
+ </key>
+
+ <!-- ZConfig refuses to let us extend the "zodb_db" section type,
+ therefore we have to duplicate. Grumble. The following is copied
+ from ZODB/component.xml and Zope/Startup/zopeschema.xml, with
+ package names expanded. -->
+
+ <section type="ZODB.storage" name="*" attribute="storage"/>
+ <key name="cache-size" datatype="integer" default="5000"/>
+ <key name="pool-size" datatype="integer" default="7"/>
+ <key name="version-pool-size" datatype="integer" default="3"/>
+ <key name="version-cache-size" datatype="integer" default="100"/>
+
+ <multikey name="mount-point" required="yes" attribute="mount_points"
+ datatype="Zope.Startup.datatypes.mount_point">
+ <description>
+ The mount point is the slash-separated path to which this database
+ will be mounted within the Zope application server.
+ </description>
+ </multikey>
+
+ <key name="connection-class"
+ datatype="Zope.Startup.datatypes.importable_name">
+ <description>
+ Change the connection class a database uses on a per-database basis to
+ support different connection policies. Use a Python dotted-path
+ name to specify the connection class.
+ </description>
+ </key>
+
+ <key name="class-factory" datatype="Zope.Startup.datatypes.importable_name"
+ default="DBTab.ClassFactories.autoClassFactory">
+ <description>
+ Change the class factory function a database uses on a
+ per-database basis to support different class factory policy.
+ Use a Python dotted-path name to specify the class factory function.
+ </description>
+ </key>
+
+ <key name="container-class"
+ datatype="Zope.Startup.datatypes.python_dotted_path">
+ <description>
+ Change the contiainer class a (mounted) database uses on a
+ per-database basis to support a different container than a plain
+ Folder. Use a Python dotted-path name to specify the container class.
+ </description>
+ </key>
+
+ </sectiontype>
+
+</component>
+
Added: Products.Ape/trunk/datatypes.py
===================================================================
--- Products.Ape/trunk/datatypes.py (rev 0)
+++ Products.Ape/trunk/datatypes.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,81 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""ZConfig data types
+
+$Id$
+"""
+
+from ZODB.config import BaseConfig
+from Zope.Startup.datatypes import ZopeDatabase
+from apelib.zope2.mapper import load_conf
+from apelib.zodb3 import storage, db, resource
+
+
+class Storage(BaseConfig):
+
+ def open(self):
+ config = self.config
+ conns = {}
+ for c in config.connections:
+ conns[c.name] = c.open()
+ conf = load_conf(config.mapper_variation, search_products=1)
+ r = resource.StaticResource(conf)
+ return storage.ApeStorage(
+ conf_resource=r, connections=conns, name=self.name,
+ debug_conflicts=config.debug_conflicts)
+
+
+class Database(ZopeDatabase):
+
+ def createDB(self):
+ config = self.config
+ if config.mapper_variation:
+ conf = load_conf(config.mapper_variation, search_products=1)
+ r = resource.StaticResource(conf)
+ else:
+ r = None
+ s = config.storage.open()
+ kw = {}
+ for name in ('scan_interval', 'pool_size', 'cache_size',
+ 'version_pool_size', 'version_cache_size'):
+ if hasattr(config, name):
+ kw[name] = getattr(config, name)
+ d = db.ApeDB(storage=s, conf_resource=r, **kw)
+ return d
+
+
+def getParams(config):
+ kw = {}
+ for name in config.__dict__.keys():
+ if not name.startswith('_'):
+ kw[name] = getattr(config, name)
+ return kw
+
+
+class FSConnection(BaseConfig):
+
+ def open(self):
+ from apelib.fs.connection import FSConnection as impl
+ return impl(**getParams(self.config))
+
+
+class DBAPIConnection(BaseConfig):
+
+ def open(self):
+ c = self.config
+ return c.connection_class(
+ module_name=c.module_name,
+ connect_expression=c.connect_expression,
+ prefix=c.prefix,
+ )
Property changes on: Products.Ape/trunk/datatypes.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/doc/apexml.txt
===================================================================
--- Products.Ape/trunk/doc/apexml.txt (rev 0)
+++ Products.Ape/trunk/doc/apexml.txt 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,269 @@
+
+
+Ape Configuration XML Files
+
+
+Ape configures mappers using configuration files. The standard Zope 2
+mapper configuration is in the file 'apeconf.xml' in the
+'apelib.zope2' package. Refer to the standard configuration file as
+an example.
+
+Ape lets you mix configurations from any number of configuration
+files, as long as none of the files contain conflicting directives.
+To add support for a new class to Ape, write your own 'apeconf.xml'
+rather than modify the standard configuration. If you're writing a
+Zope product, place your 'apeconf.xml' in your product directory. Ape
+will look for it and mix your configuration with the standard
+configuration.
+
+The Ape configuration schema is fairly simple. The root
+'configuration' tag contains component definitions and registrations.
+Variation tags are intermingled with the other configuration
+directives, allowing a configuration file to define multiple
+variations of the standard configuration.
+
+The schema uses two conventions that differ from XML norms. First,
+'variation' elements may appear anywhere child elements are allowed;
+see the description of the 'variation' element. Second, most
+attributes and child elements are optional, allowing minimal
+declarations.
+
+
+Elements
+
+
+ <configuration>
+ ...
+ </configuration>
+
+ The root element of an Ape configuration file. Uses no
+ attributes.
+
+
+
+ <variation name="...">
+ ...
+ </variation>
+
+ The variation tag signifies that all contained directives belong
+ to a variation rather than the standard configuration. A
+ variation element may appear anywhere child elements are allowed.
+
+ Variation tags let you specify multiple configuration variations
+ in a single file, keeping independent configurations together in a
+ logical way. Ape uses variations to keep 'apeconf.xml' clear
+ while providing alternative configurations.
+
+ The 'name' attribute is required. It specifies which variation
+ the child directives belong to. Placing many directives in a
+ single variation tag is equivalent to splitting those directives
+ into several variation tags of the same name.
+
+ Directives within a variation tag become part of a varied
+ configuration rather than the standard configuration. A
+ configuration file can modify any number of variations.
+ Directives outside any variation tag become part of the standard
+ configuration. When Ape loads a mapper, it specifies which
+ variation it needs, then the configuration machinery combines
+ directives from the variation with the standard directives.
+
+ Ape uses variations to configure both a SQL and filesystem mapper
+ in the same file. Before Ape used XML, it used three Python
+ modules to configure mappers: a base mapper, a filesystem
+ variation, and a SQL variation. The three separate files made it
+ difficult to understand how to configure a mapper, and in fact
+ introduced minor errors that went unnoticed for a long time. A
+ single XML file containing multiple variations turned out clearer
+ and shorter than equivalent Python code.
+
+
+
+ <mapper
+ name="..."
+ [ class="..." ]
+ [ extends="..." ] >
+ ...
+ </mapper>
+
+ Declares a mapper component. The 'name' attribute is required and
+ usually specifies a fully-qualified class name. The other
+ attributes are optional. A mapper element should be a direct
+ child of either a 'variation' or a 'configuration' element. A
+ mapper element may contain the following optional child elements:
+
+ serializer
+ gateway
+ variation
+
+ Ape mixes mapper configurations based on the mapper name. One
+ configuration file can define a mapper while another adds an extra
+ serializer, for example, as long as the two configurations do not
+ conflict.
+
+ The 'class' attribute specifies the class the mapper is to be used
+ for. If no 'class' attribute is associated with a mapper, the
+ mapper is abstract.
+
+ The 'extends' attribute tells the mapper to inherit components
+ from a base mapper. The sub-mapper will inherit gateways and
+ serializers. The derived mapper can override or disable inherited
+ serializers and gateways using a directive with a matched name.
+
+ Note that existence of a mapper does not imply to the system that
+ objects should be stored using that mapper. Use the 'store' and
+ 'load' directives to tell the system when to use which mappers.
+
+
+
+ <serializer
+ factory="..." | enabled="..."
+ [ name="..." ]
+ [ order="..." ] />
+
+ Declares a serializer. Either 'factory' 'enabled' is required.
+ The 'name' and 'order' attributes are optional. The 'order'
+ attribute is valid only when the element descends from a mapper
+ element. This element accepts no child elements. Use this
+ element as a child of a mapper element.
+
+ Use a Python class to implement a serializer, then use the
+ 'factory' attribute to link your class into Ape. Specify a
+ fully-qualified class name, such as "mypackage.mymodule.myclass".
+ If your class constructor requires parameters, you may add them to
+ the end of the 'factory' attribute in parentheses. (Only
+ positional arguments are supported.)
+
+ If you don't specify a 'name' attribute, the serializer will be
+ used as the main serializer for the mapper. If you specify a
+ 'name' attribute, the serializer will be added to the mapper's
+ composite serializer.
+
+ Mappers can have any number of serializers. Sometimes the
+ ordering of the serializers is important. In those rare cases,
+ use the 'order' attribute to specify a sort key. Use 'a' to make
+ it run first and 'z' to make it run last. The default sort key is
+ 'middle'.
+
+ The 'enabled' attribute lets you disable an unwanted inherited
+ serializer. To do this, set the 'enabled' attribute to 'false'
+ and match the name of the inherited serializer.
+
+
+
+ <gateway
+ factory="..." | enabled="..."
+ [ name="..." ] />
+
+ Declares a gateway. Either 'factory' or 'enabled' is required.
+ The 'name' attribute is optional. This element accepts no child
+ elements. Use this element as a child of a mapper element.
+
+ Use a Python class to implement a gateway, then use the 'factory'
+ attribute to link your class into Ape. Specify a fully-qualified
+ class name, such as "mypackage.mymodule.myclass". If your class
+ constructor requires parameters, you may add them to the end of
+ the 'factory' attribute in parentheses. (Only positional
+ arguments are supported.)
+
+ If you don't specify a 'name' attribute, the gateway will be used
+ as the main gateway for the mapper. If you specify a 'name'
+ attribute, the gateway will be added to the mapper's composite
+ gateway.
+
+ Mappers can have any number of gateways. The order in which
+ gateways are used should not matter.
+
+ The 'enabled' attribute lets you disable an unwanted inherited
+ gateway. To do this, set the 'enabled' attribute to 'false' and
+ match the name of the inherited gateway.
+
+
+
+ <classifier
+ factory="..." />
+
+ <oid-generator
+ factory="..." />
+
+ Declares the classifier or OID generator component. The 'factory'
+ attribute is required. The oid-generator element accepts no child
+ elements, but the classifier element accepts a gateway element,
+ since classifiers usually need a gateway for loading and storing
+ classification data.
+
+ Use a Python class to implement a new classifier or OID generator,
+ then use the 'factory' attribute to link your class into Ape.
+ Specify a fully-qualified class name. If your class constructor
+ requires parameters, you may add them to the end of the 'factory'
+ attribute in parentheses. (Only positional arguments are
+ supported.)
+
+
+ <store
+ class="..." | exact-class="..."
+ using="..."
+ [default-extension="..." | default-extension-source="..." ]
+
+ The 'store' directive creates a rule telling the classifier which
+ mapper to use when storing instances of a particular class. It
+ accepts no child elements.
+
+ The 'class' or 'exact-class' attribute specifies which class the
+ directive applies to. Only one of the two attributes is allowed.
+ If 'class' is used, the directive also applies to subclasses; if
+ 'exact-class' is used instead, the directive applies only to
+ instances of exactly the class specified. The 'using' attribute
+ says which mapper to use for storing matched instances.
+
+ The optional 'default-extension' attribute provides a filename
+ extension to add automatically to objects stored on the filesystem
+ (if the object does not yet have an extension.) The alternative
+ 'default-extension-source' tells the classifier how to compute the
+ extension. The only currently implemented value for
+ 'default-extension-source' is 'content_type', which means that the
+ classifier should read the object's 'content_type' attribute,
+ interpret it as a mime type (i.e. 'text/plain'), and translate
+ that to a filename extension. This strategy works well enough for
+ Zope's image and file objects.
+
+
+
+ <load
+ extensions="..." | generic="..." | mapper-name="..."
+ using="..." />
+
+ The 'load' directive creates a rule telling the classifier which
+ mapper to use when loading data that has not already been
+ classified with a mapper name. It accepts no child elements.
+
+ Exactly one of 'extensions', 'generic', or 'mapper-name' is
+ required. The required 'using' attribute says which mapper to use
+ for loading matched instances.
+
+ The 'extensions' attribute specifies filename extensions that
+ trigger this rule. Separate the extensions with a space.
+
+ The 'generic' attribute tells the classifier to use this mapper in
+ certain generic situations. The allowable values for the
+ 'generic' attribute vary by classifier, but the Zope 2 classifier
+ recognizes the following values:
+
+ - 'directory': Use this mapper for filesystem directories if no
+ other rule matches.
+
+ - 'file': Use this mapper for filesystem files if no other other
+ rule matches.
+
+ - 'basepath': Use this mapper when loading the object at the
+ base path on the filesystem. The default configuration uses
+ this directive for the Zope 2 Application object.
+
+ - 'root': Use this mapper for the database root object, which
+ usually has OID "0".
+
+ The 'mapper-name' attribute sets up a mapper name alias. Over
+ time, the names of mappers may change while object instances
+ continue to use the old names. A mapper name alias alleviates
+ this problem by telling the classifier to load old instances using
+ a new mapper name.
+
Property changes on: Products.Ape/trunk/doc/apexml.txt
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/doc/outline.txt
===================================================================
--- Products.Ape/trunk/doc/outline.txt (rev 0)
+++ Products.Ape/trunk/doc/outline.txt 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,389 @@
+
+Ape Documentation Outline
+
+
+I. Purpose of Ape
+
+ A. Differences Between Object-Oriented and Relational Databases
+
+ The differences between relational databases and object-oriented
+ databases lie in their flexibility. To store data in an RDBMS, you
+ must first define the complete structure of your data. For
+ example, if you wanted to store phone numbers, you would first
+ create a table. Then in that table you would set up a few columns
+ including "name" and "phone_number". You would then write a
+ program that can interact with those specific columns. If you
+ later decide you also want to store people's email addresses, you
+ have to add another column and change your program as well.
+
+ Storing data in an OODBMS does not require defining the structure
+ ahead of time. You only have to write your program, then connect
+ your program to the OODBMS with a few instructions, and you're
+ finished. The OODBMS takes advantage of the structures you use
+ naturally when creating your program, and it simply stores the
+ structures. It is often faster and easier to write a program for
+ an OODBMS than for an RDBMS.
+
+ However, RDBMSs are very popular. Major vendors like Oracle,
+ Sybase, IBM, Borland, and others, all sell RDBMS software.
+ Computer science courses in practically every university teach
+ development and administration of RDBMS-based software. RDBMSs
+ have certain advantages derived from their mathematical
+ foundations, such as the ability to search for data based on
+ previously unanticipated criteria. Also, years of competition in
+ the RDBMS market have led to refinements in reliability and
+ scalability.
+
+ B. ZODB
+
+ One of the great strengths of Zope, a Python web application
+ server, is its database technology called ZODB. ZODB is a Python
+ object-oriented database. Software development using ZODB is fast
+ and easy. When you write software based on ZODB, you can
+ generally pretend that your program never stops, never crashes,
+ and never has to write anything to disk. ZODB takes care of the
+ remaining details.
+
+ However, there are many good reasons to use a relational database
+ instead of ZODB. People are already familiar with relational
+ databases. ZODB is only accessible through the Python programming
+ language, while relational databases are more language-neutral.
+ Relational databases can more easily adapt to unexpected
+ requirements. And because they have been around longer,
+ relational databases can often hold more data, read and write data
+ faster, and maintain full-time operation better than ZODB
+ storages.
+
+ C. Bridging the Gap
+
+ For a long time, people have requested better relational
+ integration in Zope. Zope has limited relational integration: you
+ can open connections to an RDBMS and store and retrieve data,
+ including objects. But objects from the RDBMS never reach
+ "first-class citizenship" in Zope. Zope does not allow you to
+ manipulate these objects as easily as you can work with objects
+ stored in ZODB.
+
+ There are backends for ZODB that let you store pickled objects in
+ relational databases. This solution satisfies those who need to
+ store large amounts of data, but the data is stored in a special
+ Python-only format. It prevents developers from taking full
+ advantage of relational data storage and locks out other
+ programming languages.
+
+ Ape bridges the gap between ZODB and relational data storage.
+ It lets developers store ZODB objects in arbitrary databases and
+ arbitrary formats, without changing application code. It combines
+ the advantages of orthogonal persistence with relational storage.
+
+ D. Current Limitations
+
+ To facilitate distribution, Ape is currently a Zope product. This
+ makes it difficult to reuse outside Zope. But work is underway to
+ separate it from Zope, starting with the creation of a top-level
+ Python package called apelib.
+
+II. Components
+
+ A lot of the names used in Ape are based on Martin Fowler's book
+ "Patterns of Enterprise Application Architecture".
+
+ There are many kinds of components in Ape, but to store new kinds of
+ objects or store in new formats, you generally only need to write
+ components that implement one of two interfaces: ISerializer and
+ IGateway. This document focuses on these two kinds of components.
+
+ A. Mappers
+
+ Ape uses a set of mappers to map objects to databases. Mappers
+ are components that implement a simple interface. Mappers
+ serialize, deserialize, store, and load objects. Mappers and
+ their associated components are reusable for many applications
+ needing to store and load objects, but the framework is especially
+ designed for mapping persistent object systems like ZODB.
+
+ Most mappers are responsible for loading and storing instances of
+ one class. Mappers separate serialization from storage, making it
+ possible to reuse serializers with many storage backends. A
+ mapper supplies a serializer, which extracts and installs object
+ state, and a gateway, which stores and retrieves state in
+ persistent storage.
+
+ B. Basic Sequence
+
+ To load an object, Ape requests that the gateway of a specific
+ mapper load data. The gateway queries the database and return a
+ result. Gateways may delegate the request to multiple simple
+ gateways. A gateway that delegates in this manner is called a
+ composite gateway. The composite gateway combines the results
+ into a dictionary that maps gateway names to the results from the
+ data store.
+
+ Then Ape feeds that result to the mapper's serializer, which may
+ exist on a separate machine from the gateway. The serializer
+ installs the loaded data into the object being deserialized. Like
+ gateways, serializers can also be organized as a composite
+ serializer delegating to multiple simple serializers. Finally,
+ control returns to the application.
+
+ When storing objects, the system uses the same components, but in
+ reverse order. The serializer reads the object and the results
+ are fed to the gateway, which stores the data.
+
+ ZODB is the key to loading and storing objects at the right time.
+ The Persistent base class notifies Ape when an object needs to be
+ loaded or stored.
+
+ C. Schemas
+
+ Schemas define the format of the data passed between serializers
+ and gateways. Ape defines three basic schema classes and
+ allows you to use other kinds schemas.
+
+ A FieldSchema declares that the data passed is a single field,
+ such as a string or integer. FieldSchema is appropriate when the
+ serializing data of a simple type. When using a FieldSchema, the
+ state passed between serializers and gateways is the raw data.
+
+ A RowSchema declares a list of fields. RowSchema is appropriate
+ when serializing multiple fields. When using a RowSchema, the
+ state passed between serializers and gateways is a tuple of
+ values.
+
+ A RowSequenceSchema declares a list of rows of fields.
+ RowSequenceSchema is appropriate when serializing multiple rows of
+ fields at once. When using a RowSequenceSchema, the state passed
+ between serializers and gateways is a sequence of tuples.
+
+ The only requirement Ape makes of schemas is that they
+ implement the Python equality operation (__eq__), allowing the
+ system to verify that serializers and gateways are compatible.
+ You can use many kinds of Python objects as schemas.
+
+ D. Gateways
+
+ Gateways load and store serialized state. The gateways you create
+ can store data anywhere and in any format, as long as you obey a
+ few simple rules.
+
+ The state returned by the gateway's load() method must conform to
+ the schema declared by the gateway. Conversely, the gateway can
+ expect the state passed to the store() method to conform to that
+ same schema.
+
+ The gateway must generate a hash of the stored state, allowing the
+ system to detect transaction conflicts. The hash is returned by
+ both the load() and store() methods. Hashes don't need to be
+ integers, but must be hashable as defined by Python's hash()
+ function.
+
+ E. Serializers
+
+ Serializers do the work of both pulling data out of an object and
+ pushing data into it. The serialize() method reads the internal
+ state of an object without changing the object. The deserialize()
+ method installs state into an object.
+
+ Proper serialization must answer certain questions. To answer
+ these questions, serializers receive event objects as arguments to
+ the serialize() and deserialize() methods. By interacting with
+ the events, the serializer affects the serialization and
+ deserialization processes to achieve the proper behavior.
+
+ 1. What if the serializer forgets to store an attribute?
+
+ To avoid forgetting attributes, serializers indicate to the
+ serialization event which attributes and subobjects they
+ serialized by calling the serialized() or ignore() method. (The
+ difference between the two methods will be explained in a
+ moment.) At the end of serialization, a final serializer may
+ look for any remaining attributes. If there are any attributes
+ left over, the final serializer may choose to either put the
+ rest of the attributes in a pickle or raise an exception
+ indicating which attributes were forgotten.
+
+ 2. What if two attributes refer to the same subobject under
+ different attribute names? In general, what if an object refers
+ to a subobject in more than one way?
+
+ Referring to a subobject in more ways than one is usually not a
+ problem. If one serializer serializes both references, that
+ serializer can deal with the issue in its own way. The more
+ interesting problem is that a serializer may serialize only one
+ of the references, leaving the other to be serialized by the
+ remainder pickle. If you're not careful, the remainder pickle
+ could generate a second copy of the subobject upon
+ deserialization.
+
+ To deal with this, serializers call the serialized() method
+ rather than the ignore() method. The serialized() method
+ provides the information needed by the final serializer to
+ restore references to the correct subobject. For this to work,
+ serializers also need to call deserialized() in their
+ deserialize() method, so that the unpickler knows exactly what
+ subobject to refer to.
+
+ 3. Is it possible to avoid loading the whole database into RAM
+ when deserializing? Conversely, after making a change, is it
+ possible to serialize the state of only the part of the object
+ system that has changed?
+
+ Working with only a part of the object system is one of the core
+ features provided by ZODB. ZODB assigns an object ID to each
+ persistent object to match objects with database records. When
+ you load a persistent object, ZODB loads the full state of only
+ the object you need, and when you change a persistent object,
+ ZODB stores only the corresponding database record.
+
+ During serialization, serializers use three methods of the
+ serialization event to make references to other database
+ records. Serializers first call identify() to find out if the
+ subobject is already stored in the database. If it isn't, the
+ serializer should call new_oid() to generate an identity for the
+ new subobject. In either case, the serializer then calls
+ referenced() to tell the event that it is storing a reference to
+ another database record.
+
+ During deserialization, serializers can use the resolve() method
+ of the deserialization event to refer to objects from other
+ database records without loading the full state of the objects.
+ The returned subobject may be in a "ghosted" state, meaning that
+ it temporarily has no attributes. (When you attempt to access
+ any attribute of a ghosted object, ZODB transparently loads the
+ object before looking for the attribute.)
+
+ 4. What if the record boundaries set up by the serializer don't
+ correspond directly with ZODB objects?
+
+ ZODB makes an assumption that isn't always valid in Ape: ZODB
+ assumes that objects that derive from the Persistent base class
+ are database record boundaries. In Ape, however, sometimes it
+ makes sense to serialize several Persistent objects in a single
+ database record.
+
+ In Ape, when you serialize more than one Persistent object in a
+ single record, you create what are called "unmanaged" persistent
+ objects or "UPOs". If the serializer forgets to tell Ape about
+ the UPOs, ZODB will not see changes made to them and
+ transactions involving changes to those objects may be
+ incomplete. So during both serialization and deserialization,
+ it is important for ZODB-aware serializers to append to the
+ event's 'upos' attribute.
+
+ Ape provides some useful standard serializers:
+
+ - The remainder serializer pickles and restores all the
+ attributes not stored by other serializers. This is useful for
+ development and simplifies the tree of mappers.
+
+ - The roll call serializer verifies that every attribute of an
+ object was serialized. If any are forgotten, it raises an
+ exception. This is useful when you don't want to use a
+ remainder serializer, but you don't want to lose any attributes
+ either. The roll call serializer stores nothing, so it does not
+ need to be paired with a gateway.
+
+ - The optional serializer is a decorator for a real serializer.
+ The optional serializer asks the real serializer if it is able
+ to serialize or deserialize an object (using the canSerialize()
+ method). If the test fails, the optional serializer ignores the
+ failure and falls back to a default.
+
+ Serializers access the innards of objects, often breaking
+ encapsulation because the serializers need to know exactly what
+ attributes the objects use. To avoid breaking encapsulation, you
+ can implement parts of the (de)serialization process in the
+ serializable objects.
+
+ F. Classifiers
+
+ Classifiers are the components that choose which mapper to use for
+ an object or database record. There is one classifier per object
+ database. Classifiers can be simple, always using a specific
+ mapper for specific OIDs or storing the name of the mapper in the
+ database. Classifiers can also be complex, using attributes or
+ metadata to make the choice of mapper.
+
+ Ape consults the classifier before loading or storing any object.
+ The standard classifier Ape uses for Zope 2 object databases is
+ fairly complex, involving meta_types, filename extensions, and
+ class names. There is also a simpler classifier in apelib.core.
+
+ Classifiers work with "classifications". Classifications are
+ dictionaries mapping strings to strings. Classifications contain
+ information that might be useful for choosing object and database
+ record types. Unlike the rest of the state of an object,
+ classifications do not need to be precise.
+
+ When loading an object, Ape calls the classifier's classifyState()
+ method. The classifier may choose to load information from the
+ database to discover the type of database record. It usually does
+ this using a gateway private to the classifier. classifyState()
+ returns a classification.
+
+ When storing an object, Ape calls the classifier's
+ classifyObject() method. The classifier may choose to examine the
+ object or it may know enough just by the keychain assigned to the
+ object. classifyObject() returns a classification and
+ mapper_name, but it should not store the generated classification
+ yet. Ape later calls the store() method of the classifier, at
+ which point the classifier has the option of storing the
+ classification. (This separation exists so that serialization and
+ data storage can occur on different machines.)
+
+
+III. Example: Mapping Zope 2
+
+ The 'apeconf.xml' file in 'apelib.zope2' provides two variations of
+ a Zope 2 mapper configuration. To extend the Zope 2 mappers with
+ your own mappers, write an apeconf.xml file and place it in your
+ Zope 2 product. One maps to the filesystem and the other maps to a
+ SQL database. See apexml.txt for details on apeconf.xml.
+
+ The SQL mapper uses a Python DB-API 2.0 module to connect to the
+ database. It uses integers as keys and puts information about each
+ object in several tables. All objects have an entry in the
+ classification table. The SQL mapper uses a simple schema, but Ape
+ is not limited to this schema.
+
+ The filesystem mapper stores data in a directory and its
+ subdirectories. It annotates files and directories using
+ ".properties" files. The filesystem mapper both recognizes and
+ generates filename extensions.
+
+ Normally, ZODB caches objects indefinitely. This leads to excellent
+ performance, but in the case of Ape, it prevents the object system
+ from having the most current data all the time. One workaround is
+ to set the ZODB cache size to zero, forcing ZODB to clear its cache
+ after every transaction, but that solution eliminates the ZODB
+ performance advantage.
+
+ Ape has a new solution to the cache invalidation problem. Ape keeps
+ a record of which OIDs are in use and correlates them with
+ filesystem paths and last-modified times. Then it periodically
+ scans those paths, invalidating OIDs if it sees changes. The
+ solution could potentially work even better with RDBMSs, since the
+ periodic scan could be implemented using only a single query.
+
+
+
+IV. Other ways to use the framework
+
+ ZEO: Ape separates serialization from data storage, making it
+ possible to perform serialization on a ZEO client while data storage
+ happens in a ZEO server. Ape has been successfully tested with ZEO
+ 3.2.
+
+ Zope 3: Ape is currently designed with Zope 2 in mind, but meant
+ to be reusable for Zope 3. A new set of mappers will be needed, but
+ nearly all of the interfaces should remain unchanged.
+
+ Non-Zope applications: Ape is a distinct library useful for many
+ ZODB applications. Ape makes it easier to map objects to any
+ data store.
+
+ Finally, the framework is useful for many purposes outside ZODB.
+ Once you have built a system of mappers, you can use those mappers
+ to import and export objects, synchronize with a data store, and
+ apply version control to your objects. The concepts behind Ape
+ open exciting possibilities.
Property changes on: Products.Ape/trunk/doc/outline.txt
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/doc/tutorial_slides.sxi
===================================================================
(Binary files differ)
Property changes on: Products.Ape/trunk/doc/tutorial_slides.sxi
___________________________________________________________________
Name: svn:mime-type
+ application/octet-stream
Added: Products.Ape/trunk/lib/apelib/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,22 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Apelib: Adaptable Persistence Engine library.
+
+Pattern names derived from Martin Fowler's Enterprise Application
+Architecture book.
+
+http://www.martinfowler.com/eaaCatalog/
+
+$Id$
+"""
Property changes on: Products.Ape/trunk/lib/apelib/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/config/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/config/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/config/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,17 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Ape configuration package.
+
+$Id$
+"""
Property changes on: Products.Ape/trunk/lib/apelib/config/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/config/apeconf.py
===================================================================
--- Products.Ape/trunk/lib/apelib/config/apeconf.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/config/apeconf.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,417 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Ape configuration assembler.
+
+$Id$
+"""
+from __future__ import nested_scopes
+from types import TupleType
+
+from apelib.core.mapper import Mapper, MapperConfiguration
+from apelib.core.serializers import CompositeSerializer
+from apelib.core.gateways import CompositeGateway
+from apelib.core.interfaces import IDatabaseInitializer, ConfigurationError
+
+from apelib.zodb3.zodbtables import Table, TableSchema
+from common import Directive, DirectiveReader, ComponentSystem
+
+
+class MapperDefinition(Directive):
+ schema = TableSchema()
+ schema.add('mapper_name', primary=1)
+ schema.add('extends')
+ schema.add('class_name')
+
+
+class ComponentDefinition(Directive):
+ schema = TableSchema()
+ # comptypes: 'classifier', 'oid_generator'
+ schema.add('comptype', primary=1)
+ schema.add('name', primary=1)
+ schema.add('producer')
+
+
+class MapperComponent(Directive):
+ schema = TableSchema()
+ schema.add('mapper_name', primary=1)
+ # comptypes: 'serializer', 'gateway'
+ schema.add('comptype', primary=1)
+ schema.add('name', primary=1)
+ schema.add('producer')
+ schema.add('order')
+
+
+class StoreUsing(Directive):
+ schema = TableSchema()
+ schema.add('class_name', primary=1)
+ schema.add('use_mapper')
+ schema.add('exact') # boolean
+ schema.add('default_extension')
+ schema.add('default_extension_source')
+
+
+class LoadUsing(Directive):
+ schema = TableSchema()
+ # Criterion is 'extension', 'mapper-name', or 'generic'
+ schema.add('criterion', primary=1)
+ schema.add('value', primary=1)
+ schema.add('use_mapper')
+
+
+class DisabledProducer:
+ def __init__(self, source):
+ self.source = source
+
+ def __call__(self, compsys):
+ return None
+
+
+class FactoryProducer:
+
+ def __init__(self, source, factory):
+ self.source = source
+ pos = factory.find('(')
+ if pos >= 0:
+ # Arguments specified. Interpret as a Python expression.
+ args = eval(factory[pos:])
+ if not isinstance(args, TupleType):
+ args = (args,)
+ factory = factory[:pos]
+ else:
+ args = ()
+ pos = factory.rfind('.')
+ if pos <= 0:
+ raise ValueError, "Module and class name required"
+ self.module_name = factory[:pos]
+ self.class_name = factory[pos + 1:]
+ self.args = args
+ self.sub_producer = None
+
+ def __call__(self, compsys):
+ params = []
+ if self.args:
+ params.extend(self.args)
+ if self.sub_producer is not None:
+ obj = self.sub_producer(compsys)
+ params.append(obj)
+ m = __import__(self.module_name, {}, {}, ('__doc__',))
+ try:
+ c = getattr(m, self.class_name)
+ except AttributeError:
+ raise ImportError, "No class %s in module %s" % (
+ self.class_name, self.module_name)
+ return c(*params)
+
+
+def make_producer(source, comptype, attrs, raise_exc=1):
+ if attrs.get('enabled', '').lower() == 'false':
+ return DisabledProducer(source)
+ elif attrs.has_key('factory'):
+ return FactoryProducer(source, attrs['factory'])
+ elif raise_exc:
+ raise ValueError, "Need a 'factory' or 'enabled' attribute"
+ else:
+ return None
+
+
+def get_element_handlers():
+ """Returns a dictionary of XML element handlers.
+ """
+
+ def handle_configuration(source, vars, attrs):
+ assert vars.has_key('directives')
+
+ def handle_variation(source, vars, attrs):
+ variations = vars['variations']
+ name = attrs['name']
+ d = variations.get(name)
+ if d is None:
+ # Make a new variation.
+ d = []
+ variations[name] = d
+ vars['directives'] = d
+
+ def handle_mapper(source, vars, attrs):
+ d = vars['directives']
+ mapper_name = str(attrs['name'])
+ extends = str(attrs.get('extends', ''))
+ class_name = str(attrs.get('class', ''))
+ vars['mapper_name'] = mapper_name
+ d.append(MapperDefinition(source, mapper_name, extends, class_name))
+
+ def handle_mapper_component(source, vars, attrs, comptype):
+ d = vars['directives']
+ producer = make_producer(source, comptype, attrs)
+ mapper_name = vars.get('mapper_name')
+ if mapper_name is None:
+ raise ValueError('Not inside a mapper tag')
+ else:
+ # Composite component of a mapper
+ name = attrs.get('name', '')
+ directive = MapperComponent(
+ source, mapper_name, comptype,
+ name, producer, attrs.get('order', 'middle'))
+ d.append(directive)
+ return producer
+
+ def handle_serializer(source, vars, attrs):
+ handle_mapper_component(source, vars, attrs, 'serializer')
+
+ def handle_gateway(source, vars, attrs):
+ p = vars.get('classifier_producer')
+ if p is not None:
+ # Set a gateway for a classifier.
+ if not hasattr(p, 'sub_producer'):
+ raise ValueError(
+ "Classifier at %s needs a factory" % source)
+ if p.sub_producer is not None:
+ raise ValueError(
+ "Multiple gateways in classifiers not allowed at %s" %
+ source)
+ p.sub_producer = make_producer(source, 'gateway', attrs)
+ else:
+ handle_mapper_component(source, vars, attrs, 'gateway')
+
+ def handle_classifier(source, vars, attrs):
+ d = vars['directives']
+ producer = make_producer(source, 'classifier', attrs)
+ directive = ComponentDefinition(source, 'classifier', '', producer)
+ d.append(directive)
+ vars['classifier_producer'] = producer
+
+ def handle_oid_generator(source, vars, attrs):
+ d = vars['directives']
+ producer = make_producer(source, 'oid_generator', attrs)
+ directive = ComponentDefinition(source, 'oid_generator', '', producer)
+ d.append(directive)
+
+ def handle_store(source, vars, attrs):
+ d = vars['directives']
+ cn = attrs.get('class')
+ ecn = attrs.get('exact-class')
+ if cn and ecn or not cn and not ecn:
+ raise ValueError("One of 'class' or 'exact-class' is required")
+ mapper_name = str(attrs['using'])
+ def_ext = attrs.get('default-extension')
+ def_ext_src = attrs.get('default-extension-source')
+ if def_ext and def_ext_src:
+ raise ValueError(
+ "Only one of 'default-extension' "
+ "or 'default-extension-source' is allowed")
+ directive = StoreUsing(
+ source, cn or ecn, mapper_name, bool(ecn), def_ext, def_ext_src)
+ d.append(directive)
+
+ def handle_load(source, vars, attrs):
+ d = vars['directives']
+ mapper_name = str(attrs['using'])
+ criterion = None
+ for attr in ('mapper-name', 'extensions', 'generic'):
+ if attrs.has_key(attr):
+ if criterion is not None:
+ raise ValueError("Only one criterion allowed")
+ criterion = attr
+ v = attrs[attr]
+ if attr == 'extensions':
+ first = 1
+ for ext in v.split():
+ if not ext.startswith('.'):
+ ext = '.' + ext
+ ext = ext.lower()
+ d.append(LoadUsing(
+ source, 'extension', ext, mapper_name))
+ else:
+ d.append(LoadUsing(
+ source, attr, v, mapper_name))
+
+ handlers = {
+ 'configuration': handle_configuration,
+ 'variation': handle_variation,
+ 'mapper': handle_mapper,
+ 'serializer': handle_serializer,
+ 'gateway': handle_gateway,
+ 'classifier': handle_classifier,
+ 'oid-generator': handle_oid_generator,
+ 'store': handle_store,
+ 'load': handle_load,
+ }
+
+ return handlers
+
+
+
+class BasicComponentAssembler:
+ """Assembler for producer-based components.
+
+ Configures at the time of creation.
+ """
+
+ def __init__(self, compsys, comptype, name):
+ self.compsys = compsys
+ records = compsys.dtables.query(
+ ComponentDefinition, comptype=comptype, name=name)
+ if not records:
+ raise ConfigurationError("No %s component named %s exists"
+ % (comptype, repr(name)))
+ assert len(records) == 1
+ producer = records[0]['producer']
+ self.producer = producer
+
+ def create(self):
+ self.obj = self.producer(self.compsys)
+ return self.obj
+
+ def configure(self):
+ pass
+
+
+class MapperAssembler:
+ """Assembler for one mapper component.
+ """
+ def __init__(self, compsys, comptype, name):
+ self.compsys = compsys
+ dtables = compsys.dtables
+ self.mapper_name = name
+ recs = dtables.query(MapperDefinition, mapper_name=name)
+ if not recs:
+ raise ConfigurationError("No mapper named %s exists" % repr(name))
+ self.directive = recs[0]
+ self.subobjs = [] # all subobjects
+ self._prepare_sub_components()
+
+ def _prepare_sub_components(self):
+ """Populates multi_comps with components to be used in this mapper.
+ """
+ self.multi_comps = {} # comptype -> name -> record
+ dtables = self.compsys.dtables
+ name = self.mapper_name
+ all_names = [] # mapper_name and all of its base mapper_names
+ while name:
+ all_names.append(name)
+ records = dtables.query(
+ MapperComponent, mapper_name=name)
+ for r in records:
+ d = self.multi_comps.setdefault(r.comptype, {})
+ d.setdefault(r.name, r)
+ name = dtables.query_field(
+ MapperDefinition, 'extends', mapper_name=name)
+ if name and name in all_names:
+ raise ConfigurationError(
+ "Circular extension in mappers %s" % repr(all_names))
+
+ def create(self):
+ self.obj = Mapper()
+ return self.obj
+
+ def configure(self):
+ self.obj.name = self.mapper_name
+ self.obj.class_name = self.directive.class_name or ''
+ self.add_serializers()
+ self.add_gateways()
+ self.add_initializers()
+
+ def add_serializers(self):
+ d = self.multi_comps.get('serializer', {})
+
+ # Create the main serializer
+ r = d.get('')
+ if r:
+ s = r.producer(self.compsys)
+ else:
+ s = CompositeSerializer()
+
+ # Create the contained serializers
+ ordered = [
+ ((r.order or '').lower(), name, r)
+ for name, r in d.items() if name]
+ ordered.sort()
+ for order, name, r in ordered:
+ o = r.producer(self.compsys)
+ if o is not None:
+ s.add(str(name), o)
+ self.subobjs.append(o)
+
+ # Assign it
+ self.obj.serializer = s
+
+ def add_gateways(self):
+ d = self.multi_comps.get('gateway', {})
+
+ # Create the main gateway
+ r = d.get('')
+ if r:
+ g = r.producer(self.compsys)
+ else:
+ g = CompositeGateway()
+
+ # Create the contained gateways
+ for name, r in d.items():
+ if name:
+ o = r.producer(self.compsys)
+ if o is not None:
+ g.add(str(name), o)
+ self.subobjs.append(o)
+
+ # Assign it
+ self.obj.gateway = g
+
+ def add_initializers(self):
+ for o in self.subobjs:
+ if IDatabaseInitializer.isImplementedBy(o):
+ self.obj.initializers.append(o)
+
+
+class ClassifierAssembler (BasicComponentAssembler):
+ """Assembler for one classifier.
+ """
+ def __init__(self, compsys, comptype, name):
+ assert comptype == "classifier", comptype
+ assert name == '', name
+ BasicComponentAssembler.__init__(self, compsys, comptype, name)
+
+ def configure(self):
+ dtables = self.compsys.dtables
+ for r in dtables.query(StoreUsing):
+ self.obj.add_store_rule(
+ r.class_name, r.use_mapper, r.exact,
+ r.default_extension, r.default_extension_source)
+ for r in dtables.query(LoadUsing):
+ self.obj.add_load_rule(r.criterion, r.value, r.use_mapper)
+
+
+def configure(filenames, vname=''):
+ """Returns a MapperConfiguration built from configuration files.
+ """
+ handlers = get_element_handlers()
+ reader = DirectiveReader(handlers)
+ for fn in filenames:
+ reader.read(fn)
+ directives = reader.get_directives(vname)
+ cs = ComponentSystem(directives)
+ cs.add_component_type('mapper', MapperAssembler)
+ cs.add_component_type('classifier', ClassifierAssembler)
+ cs.add_component_type('oid_generator', BasicComponentAssembler)
+ mappers = {}
+ for record in cs.dtables.query(MapperDefinition):
+ name = record.mapper_name
+ mappers[name] = cs.get('mapper', name)
+ classifier = cs.get('classifier', '')
+ oid_gen = cs.get('oid_generator', '')
+ conf = MapperConfiguration(mappers, classifier, oid_gen)
+ for obj in (classifier.gateway, oid_gen):
+ if IDatabaseInitializer.isImplementedBy(obj):
+ conf.initializers.append(obj)
+ conf.check()
+ return conf
+
Property changes on: Products.Ape/trunk/lib/apelib/config/apeconf.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/config/common.py
===================================================================
--- Products.Ape/trunk/lib/apelib/config/common.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/config/common.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,200 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Bits useful for configuration. May move to its own package.
+
+$Id$
+"""
+
+import xml.sax.handler
+from xml.sax import parse
+
+from apelib.zodb3 import zodbtables
+
+
+class Directive:
+ """Abstract base class for table-oriented directives.
+ """
+
+ schema = None # override
+
+ def __init__(self, source, *args, **kw):
+ self.source = source
+ if args:
+ columns = self.schema.get_columns()
+ for n in range(len(args)):
+ key = columns[n].name
+ if kw.has_key(key):
+ raise TypeError(
+ '%s supplied as both positional and keyword argument'
+ % repr(key))
+ kw[key] = args[n]
+ self.data = kw
+ unique_key = [self.__class__]
+ for column in self.schema.columns:
+ if column.primary:
+ unique_key.append(kw[column.name])
+ self.unique_key = tuple(unique_key)
+
+ def get_unique_key(self):
+ return self.unique_key
+
+ def index(self, tables):
+ t = tables.get(self.__class__)
+ if t is None:
+ t = zodbtables.Table(self.schema)
+ tables[self.__class__] = t
+ t.insert(self.data)
+
+ def __eq__(self, other):
+ if other.__class__ is self.__class__:
+ return other.data == self.data
+ return 0
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return "<%s from %s with %s>" % (
+ self.__class__.__name__, repr(self.source), repr(self.data))
+
+
+
+class XMLConfigReader (xml.sax.handler.ContentHandler):
+ """Reads configuration from XML files.
+ """
+
+ def __init__(self, handlers):
+ self.handlers = handlers
+ # Set up a directive list in a default variation.
+ directives = []
+ self.variations = {'': directives}
+ self.stack = [{'directives': directives,
+ 'variations': self.variations}]
+ self.locator = None
+
+ def setDocumentLocator(self, locator):
+ self.locator = locator
+
+ def startElement(self, name, attrs):
+ vars = self.stack[-1].copy()
+ self.stack.append(vars)
+ handler = self.handlers[name]
+ locator = self.locator
+ if locator is not None:
+ source = (locator.getSystemId(), locator.getLineNumber())
+ else:
+ source = ("unknown", 0)
+ handler(source, vars, attrs)
+
+ def endElement(self, name):
+ del self.stack[-1]
+
+
+
+class DirectiveReader:
+
+ def __init__(self, handlers):
+ self.directives = {} # { unique key -> variation -> directive }
+ self.handlers = handlers
+
+ def read(self, filename):
+ reader = XMLConfigReader(self.handlers)
+ parse(filename, reader)
+ for vname, directives in reader.variations.items():
+ self.add(directives, vname)
+
+ def add(self, directives, vname):
+ for d in directives:
+ key = d.get_unique_key()
+ info = self.directives.setdefault(key, {})
+ if info.has_key(vname):
+ if d == info[vname]:
+ # OK
+ pass
+ else:
+ raise KeyError(
+ 'Conflicting directives: %s != %s' % (
+ repr(d), repr(info[vname])))
+ else:
+ info[vname] = d
+
+ def get_directives(self, vname=''):
+ res = []
+ # Note that although there isn't a way to declare that a
+ # variation extends another variation, all variations should
+ # derive from the default anyway, so we don't need the
+ # extension mechanism yet.
+ if not vname:
+ vnames = ('',)
+ else:
+ vnames = (vname, '')
+ for key, info in self.directives.items():
+ for vn in vnames:
+ if info.has_key(vn):
+ res.append(info[vn])
+ break # Go to next directive
+ return res
+
+
+
+class DirectiveTables:
+
+ def __init__(self, directives):
+ self.tables = {} # {table name -> table}
+ for d in directives:
+ d.index(self.tables)
+
+ def query(self, table_name, **filter):
+ """Returns the specified directive records.
+ """
+ t = self.tables.get(table_name)
+ if t is None:
+ return []
+ return t.select(filter)
+
+ def query_field(self, table_name, field, **filter):
+ t = self.tables.get(table_name)
+ if t is None:
+ return None
+ records = t.select(filter)
+ if len(records) > 1:
+ raise LookupError, "More than one record returned from field query"
+ if not records:
+ return None
+ return records[0][field]
+
+
+
+class ComponentSystem:
+
+ def __init__(self, directives):
+ self.dtables = DirectiveTables(directives)
+ self.factories = {} # {comptype -> assembler factory}
+ self.components = {} # {(comptype, name) -> component}
+
+ def add_component_type(self, comptype, assembler_factory):
+ self.factories[comptype] = assembler_factory
+
+ def get(self, comptype, name):
+ obj = self.components.get((comptype, name))
+ if obj is not None:
+ return obj
+ f = self.factories[comptype]
+ assembler = f(self, comptype, name)
+ obj = assembler.create()
+ self.components[(comptype, name)] = obj
+ assembler.configure()
+ return obj
+
+
Property changes on: Products.Ape/trunk/lib/apelib/config/common.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/config/interfaces.py
===================================================================
--- Products.Ape/trunk/lib/apelib/config/interfaces.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/config/interfaces.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,42 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Configuration interfaces.
+
+$Id$
+"""
+
+from Interface import Interface
+
+class IDirective(Interface):
+ """A configuration directive.
+ """
+
+ def get_unique_key():
+ """Returns a key that distinguishes this directive from all others.
+
+ This is used for detecting conflicting directives. The
+ returned key must be hashable. It normally includes the type
+ (class or interface) of the directive. If this returns None,
+ the directive conflicts with nothing.
+ """
+
+ def index(tables):
+ """Adds self to a table.
+
+ tables is a mapping from table name to table. The table name
+ is usually the class of the directive.
+ """
+
+# IAssembler, IComponentSystem, etc.
+
Property changes on: Products.Ape/trunk/lib/apelib/config/interfaces.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,17 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""General object mapping framework.
+
+The names are influenced by Martin Fowler's O/R mapping patterns.
+"""
Property changes on: Products.Ape/trunk/lib/apelib/core/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/classifiers.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/classifiers.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/classifiers.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,54 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Standard classifiers.
+
+$Id$
+"""
+
+from apelib.core.interfaces import IConfigurableClassifier, IClassifier
+from apelib.core.interfaces import ClassificationError, ConfigurationError
+
+
+class SimpleClassifier:
+ """Classifies objects based purely on the class of the object.
+ """
+
+ __implements__ = IConfigurableClassifier
+ gateway = None
+
+ def __init__(self, gw):
+ self._class_to_mapper = {} # class name -> mapper_name
+ self.gateway = gw
+
+ def add_store_rule(self, class_name, mapper_name, *args, **kw):
+ self._class_to_mapper[class_name] = mapper_name
+
+ def add_load_rule(self, criterion, value, mapper_name):
+ pass
+
+ def set_option(self, mapper_name, option, value):
+ raise ConfigurationError("No options available")
+
+ def classify_object(self, event):
+ c = event.obj.__class__
+ class_name = "%s.%s" % (c.__module__, c.__name__)
+ mapper_name = self._class_to_mapper[class_name]
+ return {"class_name": class_name, "mapper_name": mapper_name}
+
+ def classify_state(self, event):
+ classification, serial = self.gateway.load(event)
+ class_name = classification["class_name"]
+ mapper_name = self._class_to_mapper[class_name]
+ return {"class_name": class_name, "mapper_name": mapper_name}
+
Property changes on: Products.Ape/trunk/lib/apelib/core/classifiers.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/events.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/events.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/events.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,186 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Standard event implementations
+
+$Id$
+"""
+
+import interfaces
+
+
+SIMPLE_IMMUTABLE_OBJECTS = (None, (), 0, 1, '', u'', False, True)
+
+
+class DatabaseInitEvent:
+ """Database initialization event.
+ """
+ __implements__ = interfaces.IDatabaseInitEvent
+ connections = None
+ clear_all = False
+
+ def __init__(self, connections, clear_all):
+ self.connections = connections
+ self.clear_all = clear_all
+
+
+class MapperEvent:
+ __implements__ = interfaces.IMapperEvent
+ conf = None
+ mapper = None
+ oid = ""
+ classification = None
+
+ def __init__(self, conf, mapper, oid, classification):
+ self.conf = conf
+ self.mapper = mapper
+ self.oid = oid
+ self.classification = classification
+
+
+class GatewayEvent (MapperEvent):
+ __implements__ = interfaces.IGatewayEvent
+ connections = None
+
+ def __init__(self, conf, mapper, oid, classification, connections):
+ MapperEvent.__init__(self, conf, mapper, oid, classification)
+ self.connections = connections
+
+
+class LoadEvent (GatewayEvent):
+ """Object loading event.
+ """
+ __implements__ = interfaces.ILoadEvent
+
+ def classify(self, oid):
+ sub_event = LoadEvent(self.conf, None, oid, None, self.connections)
+ return self.conf.classifier.classify_state(sub_event)
+
+
+class StoreEvent (GatewayEvent):
+ """Object storing event.
+ """
+ __implements__ = interfaces.IStoreEvent
+ is_new = False
+
+ def __init__(self, conf, mapper, oid, classification, connections, is_new):
+ GatewayEvent.__init__(
+ self, conf, mapper, oid, classification, connections)
+ self.is_new = is_new
+
+
+class SDEvent (MapperEvent):
+ __implements__ = interfaces.ISDEvent
+ obj_db = None
+ obj = None
+ serializer_name = ""
+ upos = None
+ external = None
+
+ def __init__(self, conf, mapper, oid, classification, obj_db, obj):
+ MapperEvent.__init__(self, conf, mapper, oid, classification)
+ self.obj_db = obj_db
+ self.obj = obj
+ self.upos = []
+ # self.external has the form [(oid, subobject)]
+ self.external = []
+
+
+class DeserializationEvent (SDEvent):
+ __implements__ = interfaces.IFullDeserializationEvent
+
+ def __init__(self, conf, mapper, oid, classification, obj_db, obj):
+ SDEvent.__init__(self, conf, mapper, oid, classification, obj_db, obj)
+ self._loaded_refs = {} # { (serializer_name, name) -> object }
+
+ # IDeserializationEvent interface methods:
+
+ def deserialized(self, name, value):
+ self._loaded_refs['%s:%s' % (self.serializer_name, name)] = value
+
+ def resolve(self, name, oid, classification=None):
+ """Retrieves a referenced subobject (usually ghosted initially).
+ """
+ ob = self.obj_db.get(oid, classification)
+ self.external.append((oid, ob))
+ self.deserialized(name, ob)
+ return ob
+
+ # IFullDeserializationEvent interface methods:
+
+ def resolve_internal(self, ref):
+ """Returns an object already deserialized by another serializer.
+
+ 'ref' is a tuple containing (serializer_name, name).
+ """
+ return self._loaded_refs[ref]
+
+
+class SerializationEvent (SDEvent):
+ __implements__ = interfaces.IFullSerializationEvent
+
+ def __init__(self, conf, mapper, oid, classification, obj_db, obj):
+ SDEvent.__init__(self, conf, mapper, oid, classification, obj_db, obj)
+ self._attrs = {}
+ # _internal_refs:
+ # id(ob) -> (serializer_name, name)
+ self._internal_refs = {}
+ # _internal_ref_list contains all objects that may be referenced
+ # internally. This only ensures that id(ob) stays consistent.
+ self._internal_ref_list = []
+
+ # ISerializationEvent interface methods:
+
+ def serialized(self, name, value, is_attribute):
+ """See the ISerializationEvent interface."""
+ for ob in SIMPLE_IMMUTABLE_OBJECTS:
+ # If value is a simple immutable object, don't make a
+ # reference to it. Compare by identity rather than
+ # equality, otherwise rich comparison leads to surprises.
+ if value is ob:
+ break
+ else:
+ # Make internal references only for mutable or complex objects.
+ idx = id(value)
+ if not self._internal_refs.has_key(idx):
+ self._internal_ref_list.append(value)
+ if name is not None:
+ self._internal_refs[idx] = (
+ '%s:%s' % (self.serializer_name, name))
+ else:
+ self._internal_refs[idx] = None
+ if is_attribute and name is not None:
+ self._attrs[name] = 1
+
+ def referenced(self, name, value, is_attribute, oid):
+ assert oid is not None
+ self.external.append((oid, value))
+ self.serialized(name, value, is_attribute)
+
+ def ignore(self, name_or_names):
+ if isinstance(name_or_names, (str, unicode)):
+ self._attrs[name_or_names] = 1
+ else:
+ for name in name_or_names:
+ self._attrs[name] = 1
+
+
+ # IFullSerializationEvent interface methods:
+
+ def get_seralized_attributes(self):
+ """Returns the name of all attributes serialized."""
+ return self._attrs.keys()
+
+ def identify_internal(self, ob):
+ """Returns (serializer_name, name) or None."""
+ return self._internal_refs.get(id(ob))
Property changes on: Products.Ape/trunk/lib/apelib/core/events.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/gateways.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/gateways.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/gateways.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,126 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Standard gateway classes.
+
+$Id$
+"""
+
+import time
+
+from interfaces import IGateway, OIDConflictError
+
+
+class CompositeGateway:
+ """Gateway that delegates to multiple smaller gateways."""
+
+ __implements__ = IGateway
+ schema = None
+
+ def __init__(self, base=None):
+ self._gws = {}
+ if base is not None:
+ self._gws.update(base._gws)
+ self._update_schema()
+
+ def _update_schema(self):
+ self.schema = {}
+ for name, gw in self._gws.items():
+ s = gw.schema
+ if s is not None:
+ self.schema[name] = s
+
+ def add(self, name, gw, force=0):
+ if not force and self._gws.has_key(name):
+ raise KeyError, "Gateway name %s in use" % name
+ self._gws[name] = gw
+ self._update_schema()
+
+ def remove(self, name):
+ del self._gws[name] # raise KeyError if not in use
+ self._update_schema()
+
+ def has(self, name):
+ return self._gws.has_key(name)
+
+ def load(self, event):
+ """Loads data.
+
+ Returns a pair containing the data and an object
+ that acts as a serial number or a hash of the data.
+ The serial number is either a time stamp or some other object
+ that can be consistently compared to detect conflicts.
+ """
+ full_state = {}
+ serials = {}
+ for name, gw in self._gws.items():
+ state, serial = gw.load(event)
+ if state is not None:
+ full_state[name] = state
+ if serial is not None:
+ serials[name] = serial
+ serials = serials.items()
+ serials.sort()
+ return full_state, tuple(serials)
+
+ def store(self, event, full_state):
+ """Stores data.
+
+ Returns a new serial.
+ """
+ serials = {}
+ for name, gw in self._gws.items():
+ state = full_state.get(name)
+ # print 'gateway storing', event.oid, name, state
+ serial = gw.store(event, state)
+ if serial is not None:
+ serials[name] = serial
+ serials = serials.items()
+ serials.sort()
+ return tuple(serials)
+
+ def get_sources(self, event):
+ """Returns data source information. See IGateway.
+ """
+ res = {}
+ for gw in self._gws.values():
+ sources = gw.get_sources(event)
+ if sources is not None:
+ res.update(sources)
+ return res
+
+
+class RAMGateway:
+ """Gateway to a simple dictionary (primarily for testing).
+ """
+ __implements__ = IGateway
+ schema = None
+
+ def __init__(self, schema):
+ self.schema = schema
+ self.data = {}
+
+ def load(self, event):
+ # Returns (data, serial)
+ return self.data[event.oid]
+
+ def store(self, event, data):
+ if event.is_new and self.data.has_key(event.oid):
+ raise OIDConflictError(event.oid)
+ h = time.time()
+ self.data[event.oid] = (data, h)
+ return h
+
+ def get_sources(self, event):
+ return None
+
Property changes on: Products.Ape/trunk/lib/apelib/core/gateways.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/interfaces.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/interfaces.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/interfaces.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,559 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Public interfaces and exception classes.
+
+$Id$
+"""
+
+from Interface import Interface, Attribute
+
+
+class MappingError(Exception):
+ """Object mapping exception"""
+
+class SerializationError(MappingError):
+ """Error during serialization"""
+
+class DeserializationError(MappingError):
+ """Error during deserialization"""
+
+class StoreError(MappingError):
+ """Error while storing"""
+
+class LoadError(MappingError):
+ """Error while loading"""
+
+class ClassificationError(MappingError):
+ """Error during classification"""
+
+class ConfigurationError(Exception):
+ """Invalid mapper configuration"""
+
+class OIDConflictError(Exception):
+ """Attempt to write an object with an OID already in use"""
+
+
+class IClassFactory(Interface):
+ """Class finder."""
+
+ def get_class(module_name, class_name):
+ """Returns the named class.
+
+ A default implementation may use the Python's standard import
+ mechanism.
+ """
+
+
+class IObjectDatabase (IClassFactory):
+ """A collection of objects identifiable by OID.
+
+ In apelib.zodb3, the ZODB Connection object (the _p_jar) is
+ an IObjectDatabase.
+ """
+
+ def get(oid, classification=None):
+ """Returns a class instance, possibly ghosted.
+
+ Used during deserialization (loading/import).
+ The classification argument, a mapping, may be provided as an
+ optimization. Without it, implementations of this method may
+ have to load a full object rather than a ghosted object.
+ """
+
+ def identify(obj):
+ """Returns the OID of an object.
+
+ Used during serialization (storing/export).
+ Returns None if the object is not in the object database.
+ Raises TypeError if the object can not be stored directly
+ in the database.
+ """
+
+ def new_oid():
+ """Returns a new OID.
+
+ Used during serialization (storing/export).
+ """
+
+
+
+class IDatabaseInitializer (Interface):
+ """Provides a way to initialize a database."""
+
+ def init(event):
+ """Initializes the database, creating tables etc.
+
+ event is an IDatabaseInitEvent.
+ """
+
+
+class IDatabaseInitEvent (Interface):
+ """Interface for events involved in initializing databases."""
+
+ connections = Attribute("connections", "A mapping of database connections")
+
+ clear_all = Attribute("clear_all", """True to clear the database.
+
+ This attribute is designed for testing purposes.
+ """)
+
+
+class IMapperEvent (Interface):
+ """The base interface for events occurring in context of a mapper."""
+
+ conf = Attribute("conf", "The IMapperConfiguration")
+
+ mapper = Attribute("mapper", "The IMapper")
+
+ oid = Attribute("oid", "The OID of the object being mapped")
+
+ classification = Attribute(
+ "classification", "The classification of the object.")
+
+
+class IGatewayEvent (IMapperEvent):
+ """Interface for events used by gateways."""
+
+ connections = Attribute(
+ "connections", "A mapping of database connections")
+
+
+class ILoadEvent (IGatewayEvent):
+ """Interface for events involved in loading objects."""
+
+ def classify(oid):
+ """Returns the classification of the referenced object.
+ """
+
+
+class IStoreEvent (IGatewayEvent):
+ """Interface for events involved in storing objects."""
+
+ is_new = Attribute("is_new", """True if the object is new.
+
+ When this attribute is true, gateways should not overwrite
+ existing data but instead raise an OIDConflictError if something
+ is in the way. When it is false, gateways should overwrite
+ existing data.
+ """)
+
+
+class ISDEvent (IMapperEvent):
+ """Base for serialization and deserialization events."""
+
+ obj_db = Attribute("obj_db", "The relevant object database")
+
+ obj = Attribute("obj", "The object being (de)serialized.")
+
+ serializer_name = Attribute("serializer_name", "The serializer in use.")
+
+ upos = Attribute("upos", """The list of unmanaged persistent objects.
+
+ If no attention is paid to unmanaged persistent objects (UPOs),
+ they will not notify ZODB when they are changed, and hence can be
+ a challenge for the application programmer. Add UPOs to this list
+ so that ZODB will see changes made to them and save the
+ corresponding managed persistent object.""")
+
+ external = Attribute("external", """The list of external oids.
+
+ The list is built up during (de)serialization. It contains
+ [(oid, subobject)].""")
+
+
+class IDeserializationEvent(ISDEvent):
+ """A helper in the object deserialization process.
+
+ Implementations of ISerializer.deserialize() call
+ methods of this interface to restore internal and external
+ references.
+ """
+
+ def deserialized(name, value):
+ """Indicates that a named subobject was deserialized.
+
+ The event records an intra-record reference. Be careful to
+ unwrap non-persistent wrappers around the value before calling
+ this method.
+ """
+
+ def resolve(name, oid, classification=None):
+ """Returns the object identified by an inter-record reference.
+
+ The object should have been stored earlier through a call to
+ ISerializationEvent.reference(). The return value is usually
+ ghosted initially.
+
+ The event also records an intra-record reference.
+ """
+
+
+class IFullDeserializationEvent(IDeserializationEvent):
+ """Deserialization event with features for deserializing remainder data.
+ """
+
+ def resolve_internal(ref):
+ """Returns the object identified by an intra-record reference.
+
+ 'ref' is a tuple containing (serializer_name, name).
+ """
+
+
+class ISerializationEvent(ISDEvent):
+ """A helper in the object serialization process.
+
+ Implementations of ISerializer.serialize() call
+ methods of this interface to create internal and external
+ references.
+ """
+
+ def serialized(name, value, is_attribute):
+ """Indicates that a named subobject was serialized.
+
+ The event records an intra-record reference. Be careful to
+ unwrap non-persistent wrappers around the value before calling
+ this method.
+ """
+
+ def referenced(name, value, is_attribute, oid):
+ """Notifies the system of an inter-record reference.
+
+ Be careful to unwrap non-persistent wrappers around the value
+ before calling this method. Once the referenced object gets
+ stored, the deserialize() method of the serializer will be
+ able to find the referenced object by calling
+ IDeserializationEvent.resolve().
+
+ The event also records an intra-record reference.
+ """
+
+ def ignore(name_or_names):
+ """Indicates attribute name(s) to be ignored when serializing.
+ """
+
+
+class IFullSerializationEvent(ISerializationEvent):
+ """Serialization event with features for ensuring complete serialization.
+
+ Used for generating a 'remainder pickle'.
+ """
+
+ def get_seralized_attributes():
+ """Returns the names of all attributes serialized.
+ """
+
+ def identify_internal(ob):
+ """Returns the intra-record reference for a subobject, if there is one.
+
+ Returns (serializer_name, name) or None.
+ """
+
+
+class ISerializer(Interface):
+ """Object serializer / deserializer"""
+
+ schema = Attribute("schema", "The schema used by this component.")
+
+ def can_serialize(obj):
+ """Returns true if this serializer can serialize the given object.
+ """
+
+ def serialize(event):
+ """Returns the state of this part of the object.
+
+ Use the ISerializationEvent to set up internal and external
+ references.
+ """
+
+ def deserialize(event, state):
+ """Fills in the state of this part of the object.
+
+ Use the IDeserializationEvent to resolve external references.
+ No return value.
+ """
+
+
+class IFullObjectSerializer(ISerializer):
+ """Serializes/deserializes the complete state of objects.
+
+ The serialized state does not need to include the class of the object,
+ which is maintained separately.
+
+ IFullObjectSerializers usually delegate to multiple ISerializers
+ to do the actual work of (de)serialization. The schema of
+ IFullObjectSerializers is usually a dictionary containing the name
+ and schema of its constituent ISerializers.
+ """
+
+ def new_instance(event):
+ """Returns a new instance.
+
+ event is as IDeserializationEvent.
+
+ If this serializer works with instances of only one class,
+ new_instance() should not require the use of a
+ classification. Implementations that need the
+ classification argument can return None when classification is
+ None, but it may take more work to fetch the classification.
+
+ Implementations should use the IClassFactory implementation
+ in the obj_db attribute of the event to load classes.
+ """
+
+
+class IGateway (Interface):
+ """Loads and stores data by OID.
+
+ Implementations can store in entire tables, pieces of tables, translate
+ for storage in joined tables, or store in some entirely different way.
+
+ Based on _Patterns of Enterprise Application Architecture_
+ by Martin Fowler.
+ """
+
+ schema = Attribute("schema", "The schema used by this component.")
+
+ def load(event):
+ """Loads data.
+
+ event is an ILoadEvent.
+
+ Returns a pair containing the data and a hash of the data.
+ The hash value is either an integer or an object that is
+ hashable using the Python hash() function. The hashable
+ object is used to detect storage conflicts.
+
+ If no data is available for the requested OID, load() should
+ raise a KeyError.
+ """
+
+ def store(event, data):
+ """Stores data.
+
+ event is an IStoreEvent.
+
+ Returns a new hash value.
+ """
+
+ def get_sources(event):
+ """Returns source information for an OID. event is an IGatewayEvent.
+
+ The source information allows the system to poll for changes
+ to keep caches in sync with the data. Where polling is not
+ necessary, gateways are free to return None.
+
+ The source information is a dictionary in the format:
+ {(source_repository, path): state}. The repository must be an
+ ISourceRepository. The source and state must be in a form
+ recognized by the repository. Since they are used as
+ dictionary keys, both the repositories and paths must be
+ hashable.
+ """
+
+
+class IClassifier(Interface):
+ """Object classifier
+
+ Implementations of this interface are a little like biologists.
+ During serialization, the classify_object() method returns a
+ mapping containing the classification of subob (like a biologist
+ identifying a creature's genus and species). During
+ deserialization, the classify_state() method decides what kind of
+ objects to create for a stored state (like a biologist showing you
+ a creature of a certain genus and species).
+
+ The keys in classifications are implementation-dependent.
+ """
+
+ gateway = Attribute("gateway", """The classification IGateway.
+
+ Classifiers load and store classifications using a gateway. This
+ attribute allows the system to store the classification of an
+ object by calling gateway.store().
+ """)
+
+ def classify_object(event):
+ """Returns a classification with at least a mapper_name.
+
+ event is an ILoadEvent without a mapper or classification
+ (since this method chooses them).
+ """
+
+ def classify_state(event):
+ """Returns a classification with at least a mapper_name.
+
+ event is an ILoadEvent without a mapper or classification
+ (since this method chooses them).
+
+ May load the classification from storage by calling
+ self.gateway.load().
+ """
+
+
+class IConfigurableClassifier (IClassifier):
+ """Classifier that accepts registrations.
+ """
+
+ def add_store_rule(class_name, mapper_name, exact=False,
+ default_extension=None, default_extension_source=None):
+ """Adds a rule that says which mapper to use for storing an instance.
+
+ If 'exact' is true, the mapper will not be used for
+ subclasses. 'default_extension' provides the default filename
+ extension to use when storing to the filesystem.
+ 'default_extension_source' selects a method of determining the
+ extension. One method is 'content_type', which reads the
+ content_type attribute of the object being stored and
+ translates the mime type to an extension. Don't provide both
+ 'default_extension' and 'default_extension_source'.
+ """
+
+ def add_load_rule(criterion, value, mapper_name):
+ """Adds a rule that says which mapper to use for loading some data.
+
+ The following values for 'criterion' are common:
+
+ 'mapper_name' - matches a previously stored mapper name
+ (useful for mapper name changes)
+ 'extension' - matches a filename extension
+ 'generic' - matches certain kinds of data. The
+ generic values depend on the classifier, but
+ include 'file', 'directory', 'basepath', and 'root'.
+ """
+
+
+class IOIDGenerator (Interface):
+ """A utility for generating OIDs.
+ """
+
+ root_oid = Attribute("root_oid", "The OID to use for the root")
+
+ def new_oid(event):
+ """Returns a new oid, which should be a string.
+
+ event is an IGatewayEvent.
+ """
+
+
+class IMapper (Interface):
+ """A hub for mapping a certain kind of object.
+ """
+ name = Attribute("name", "The name of this mapper")
+
+ class_name = Attribute(
+ "class_name", "The class expected by this mapper (may be empty)")
+
+ serializer = Attribute(
+ "serializer", "The IObjectSerializer for this mapper")
+
+ gateway = Attribute("gateway", "The IGateway for this mapper")
+
+ initializers = Attribute("initializers", "A list of IDatabaseInitializers")
+
+
+class IConfigurableMapper (IMapper):
+ """Adds operations to IMapper for configuration.
+ """
+
+ def check(my_name):
+ """Verifies the mapper configuration is sane.
+
+ Raises a ConfigurationError if inconsistencies are detected.
+
+ 'my_name' gives the name of the mapper for debugging purposes.
+ """
+
+
+class IMapperConfiguration (Interface):
+ """A configuration of mappers.
+ """
+
+ mappers = Attribute("mappers", "Maps mapper name to IMapper")
+
+ classifier = Attribute("classifier", "The IClassifier")
+
+ oid_gen = Attribute("oid_gen", "The IOIDGenerator")
+
+ initializers = Attribute("initializers", "A list of IDatabaseInitializers")
+
+ def check():
+ """Verifies the configuration is sane.
+
+ Raises a ConfigurationError if inconsistencies are detected.
+ """
+
+
+class ITPCConnection(Interface):
+ """Connection involved in minimal two-phase commit.
+
+ Based on ZODB.Transaction.
+ """
+
+ def connect():
+ """Opens any resources needed for transactions. Called only once."""
+
+ def sortKey():
+ """Returns a sort key for consistent ordering."""
+
+ def getName():
+ """Returns a human-readable name."""
+
+ def begin():
+ """Called before the first phase of two-phase commit."""
+
+ def vote():
+ """Called upon transition to the second phase of two-phase commit."""
+
+ def abort():
+ """Aborts the transaction."""
+
+ def finishWrite():
+ """Writes data in the second phase."""
+
+ def finishCommit():
+ """Commits in the second phase."""
+
+ def close():
+ """Closes resources. Called only once."""
+
+
+class ISourceRepository(Interface):
+ """Represents a collection of object sources.
+
+ Designed to helps keep a cache in sync with its sources.
+ """
+
+ def poll(sources):
+ """Returns changed source information.
+
+ The source information is a mapping that maps
+ (source_repository, path) to a state object. The
+ source_repository will always be this object (the redundancy
+ keeps things simpler for scanners). This method returns a
+ mapping containing only the items of the input dictionary
+ whose state has changed.
+ """
+
+
+class IColumnSchema (Interface):
+ """A column in a table."""
+
+ name = Attribute(
+ "name", "The column name")
+
+ type = Attribute(
+ "type", "The type of data held in the column, as a string")
+
+ unique = Attribute(
+ "unique", "True if the column is part of the primary key")
Property changes on: Products.Ape/trunk/lib/apelib/core/interfaces.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/io.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/io.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/io.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,291 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Ape I/O facades.
+
+These facades implement commonly useful high-level mapper operations.
+
+$Id$
+"""
+
+from weakref import proxy
+
+from events import DatabaseInitEvent, GatewayEvent, LoadEvent, StoreEvent
+from events import SerializationEvent, DeserializationEvent
+from interfaces import IMapperConfiguration, ITPCConnection, IObjectDatabase
+from interfaces import LoadError
+
+
+class GatewayIO:
+ """Gateway operations facade."""
+
+ def __init__(self, conf, connections):
+ assert IMapperConfiguration.isImplementedBy(conf), conf
+ self.conf = conf
+ self.conn_map = connections
+ # Sort the connections by sort key. Use an extra index to avoid
+ # using connections as sort keys.
+ items = [] # [(sort_key, index, conn)]
+ index = 0
+ for c in connections.values():
+ assert ITPCConnection.isImplementedBy(c)
+ sort_key = c.sortKey()
+ items.append((sort_key, index, c))
+ index += 1
+ items.sort()
+ conn_list = []
+ for sort_key, index, c in items:
+ conn_list.append(c)
+ self.conn_list = conn_list
+
+ def open_connections(self):
+ try:
+ opened = []
+ for c in self.conn_list:
+ c.connect()
+ opened.append(c)
+ except:
+ for c in opened:
+ c.close()
+ raise
+
+ def close_connections(self):
+ for conn in self.conn_list:
+ conn.close()
+
+ def get_connection_list(self):
+ return self.conn_list
+
+ def get_connection_map(self):
+ return self.conn_map
+
+ def init_databases(self, clear_all=0):
+ """Creates tables, etc.
+ """
+ # Find all initializers, eliminating duplicates.
+ initializers = {} # obj -> 1
+ for mapper in self.conf.mappers.values():
+ for obj in mapper.initializers:
+ initializers[obj] = 1
+ for obj in self.conf.initializers:
+ initializers[obj] = 1
+
+ # Now call them.
+ event = DatabaseInitEvent(self.conn_map, clear_all)
+ for initializer in initializers.keys():
+ initializer.init(event)
+
+ def classify_state(self, oid):
+ event = LoadEvent(self.conf, None, oid, None, self.conn_map)
+ # Returns classification
+ return self.conf.classifier.classify_state(event)
+
+ def load(self, oid):
+ classification = self.classify_state(oid)
+ mapper_name = classification['mapper_name']
+ mapper = self.conf.mappers[mapper_name]
+ event = LoadEvent(
+ self.conf, mapper, oid, classification, self.conn_map)
+ state, hash_value = mapper.gateway.load(event)
+ return event, classification, state, hash_value
+
+ def store(self, oid, classification, state, is_new):
+ mapper_name = classification['mapper_name']
+ mapper = self.conf.mappers[mapper_name]
+ event = StoreEvent(
+ self.conf, mapper, oid, classification, self.conn_map, is_new)
+ # Store the classification first
+ self.conf.classifier.gateway.store(event, classification)
+ # Store the state second
+ new_hash = mapper.gateway.store(event, state)
+ return event, new_hash
+
+ def get_sources(self, oid):
+ try:
+ classification = self.classify_state(oid)
+ except LoadError:
+ # Doesn't exist.
+ return {}
+ mapper_name = classification['mapper_name']
+ mapper = self.conf.mappers[mapper_name]
+ event = LoadEvent(
+ self.conf, mapper, oid, classification, self.conn_map)
+ return mapper.gateway.get_sources(event)
+
+ def new_oid(self):
+ event = GatewayEvent(self.conf, None, None, None, self.conn_map)
+ return self.conf.oid_gen.new_oid(event)
+
+
+
+class ObjectSystemIO:
+ """Object system (de)serialization facade."""
+
+ def __init__(self, conf, obj_db):
+ assert IMapperConfiguration.isImplementedBy(conf), conf
+ assert IObjectDatabase.isImplementedBy(obj_db), obj_db
+ self.conf = conf
+ self.obj_db = obj_db
+
+ def classify_object(self, obj, oid):
+ event = SerializationEvent(
+ self.conf, None, oid, None, self.obj_db, obj)
+ # Returns classification
+ return self.conf.classifier.classify_object(event)
+
+ def serialize(self, oid, obj):
+ classification = self.classify_object(obj, oid)
+ mapper_name = classification['mapper_name']
+ mapper = self.conf.mappers[mapper_name]
+ event = SerializationEvent(
+ self.conf, mapper, oid, classification, self.obj_db, obj)
+ state = mapper.serializer.serialize(event)
+ return event, classification, state
+
+ def deserialize(self, oid, obj, classification, state):
+ mapper_name = classification['mapper_name']
+ mapper = self.conf.mappers[mapper_name]
+ event = DeserializationEvent(
+ self.conf, mapper, oid, classification, self.obj_db, obj)
+ mapper.serializer.deserialize(event, state)
+ return event
+
+ def new_instance(self, oid, classification):
+ mapper_name = classification['mapper_name']
+ mapper = self.conf.mappers[mapper_name]
+ event = DeserializationEvent(
+ self.conf, mapper, oid, classification, self.obj_db, None)
+ return mapper.serializer.new_instance(event)
+
+
+
+class ExportImport:
+ """Simple import/export facade.
+ """
+ __implements__ = IObjectDatabase
+
+ def __init__(self, conf, connections, class_factory=None):
+ self._objects = {} # { oid -> obj }
+ self._oids = {} # { id(obj) -> oid }
+ # _incomplete contains the oids of objects not yet
+ # imported fully.
+ self._incomplete = {} # { oid -> 1 }
+ self._class_factory = class_factory
+ # Avoid a circular reference by making a weakref proxy
+ self.obj_io = ObjectSystemIO(conf, proxy(self))
+ self.gw_io = GatewayIO(conf, connections)
+
+
+ def _register(self, oid, obj):
+ """Registers obj in the temporary object index.
+
+ Returns true if the object was added to the index for the first
+ time. If the registration conflicts, raises an exception.
+ """
+ is_new = 0
+ if self._objects.has_key(oid):
+ if self._objects[oid] is not obj:
+ raise ValueError, (
+ "Multiple objects for oid %s" % repr(oid))
+ else:
+ self._objects[oid] = obj
+ is_new = 1
+ obj_id = id(obj)
+ if self._oids.has_key(obj_id):
+ if self._oids[obj_id] != oid:
+ raise ValueError, (
+ "Multiple oids for object %s" % repr(obj))
+ else:
+ self._oids[obj_id] = oid
+ is_new = 1
+ return is_new
+
+
+ def export_object(self, src_obj, dest_oid=None, deactivate_func=None):
+ count = 0
+ if dest_oid is None:
+ dest_oid = self.new_oid()
+ self._register(dest_oid, src_obj)
+ # Export subobjects.
+ todo = [(dest_oid, src_obj)]
+ while todo:
+ oid, obj = todo.pop()
+ event, classification, state = self.obj_io.serialize(oid, obj)
+ count += 1
+ if deactivate_func is not None:
+ deactivate_func(obj, count)
+ self.gw_io.store(oid, classification, state, False)
+ ext_refs = event.external
+ if ext_refs:
+ for ext_oid, ext_obj in ext_refs:
+ if self._register(ext_oid, ext_obj):
+ todo.append((ext_oid, ext_obj))
+
+
+ def import_object(self, src_oid, dest_obj=None, commit_func=None):
+ count = 0
+ if dest_obj is None:
+ dest_obj = self.get(src_oid)
+ root_obj = dest_obj
+ self._register(src_oid, dest_obj)
+ # Import subobjects.
+ todo = [(src_oid, dest_obj)]
+ while todo:
+ oid, obj = todo.pop()
+ e, classification, state, hash_value = self.gw_io.load(oid)
+ event = self.obj_io.deserialize(oid, obj, classification, state)
+ if self._incomplete.has_key(oid):
+ del self._incomplete[oid]
+ count += 1
+ if commit_func is not None:
+ commit_func(obj, count)
+ ext_refs = event.external
+ if ext_refs:
+ for ext_oid, ext_obj in ext_refs:
+ if (self._register(ext_oid, ext_obj)
+ or self._incomplete.has_key(ext_oid)):
+ todo.append((ext_oid, ext_obj))
+ return root_obj
+
+
+ # IObjectDatabase implementation
+
+ def get_class(self, module, name):
+ # Normally called only while importing
+ if self._class_factory is not None:
+ return self._class_factory.get_class(module, name)
+ else:
+ m = __import__(module, {}, {}, ('__doc__',))
+ return getattr(m, name)
+
+ def get(self, oid, classification=None):
+ # Should be called only while importing
+ try:
+ return self._objects[oid]
+ except KeyError:
+ # This object has not been loaded yet. Make a stub.
+ e, classification, state, hash_value = self.gw_io.load(oid)
+ obj = self.obj_io.new_instance(oid, classification)
+ # Don't fill in the state yet, to avoid infinite
+ # recursion. Just register it.
+ self._incomplete[oid] = 1
+ self._register(oid, obj)
+ return obj
+
+ def identify(self, obj):
+ # Normally called only while exporting
+ return self._oids.get(id(obj))
+
+ def new_oid(self):
+ # Should be called only while exporting
+ return self.gw_io.new_oid()
Property changes on: Products.Ape/trunk/lib/apelib/core/io.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/mapper.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/mapper.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/mapper.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,104 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Standard mapper class.
+
+$Id$
+"""
+
+from types import DictType
+
+import interfaces
+from interfaces import ConfigurationError
+
+
+class Mapper:
+ """Standard mapper class.
+ """
+ __implements__ = interfaces.IConfigurableMapper
+ name = None
+ class_name = None
+ serializer = None
+ gateway = None
+ initializers = None
+
+ def __init__(self, name=None, class_name=None,
+ serializer=None, gateway=None):
+ self.name = name
+ self.class_name = class_name
+ self.serializer = serializer
+ self.gateway = gateway
+ self.initializers = []
+
+ # IConfigurableMapper implementation
+
+ def check(self, my_name):
+ s = self.serializer
+ if s is None:
+ raise ConfigurationError(
+ 'Mapper %s: No serializer configured' % my_name)
+ if not interfaces.IFullObjectSerializer.isImplementedBy(s):
+ raise ConfigurationError(
+ 'Mapper %s: Serializer is not an IFullObjectSerializer'
+ % my_name)
+ g = self.gateway
+ if g is None:
+ raise ConfigurationError(
+ 'Mapper %s: No gateway configured' % my_name)
+ if not interfaces.IGateway.isImplementedBy(g):
+ raise ConfigurationError(
+ 'Mapper %s: Gateway is not an IGateway' % my_name)
+ if s.schema != g.schema:
+ # Try to show a descriptive error
+ ss = s.schema
+ gs = g.schema
+ text = None
+ if isinstance(ss, DictType) and isinstance(gs, DictType):
+ for key in ss.keys():
+ if not gs.has_key(key):
+ text = 'No gateway provided for serializer "%s"' % key
+ break
+ elif ss[key] != gs[key]:
+ text = 'Mismatch on name "%s". %s != %s' % (
+ key, ss[key], gs[key])
+ break
+ if text is None:
+ for key in gs.keys():
+ if not ss.has_key(key):
+ text = ('No serializer provided for gateway "%s"'
+ % key)
+ break
+ if text is None:
+ text = '%s != %s' % (ss, gs)
+ raise ConfigurationError(
+ 'Mapper %s: Mismatched schemas. %s' % (my_name, text))
+
+
+class MapperConfiguration:
+ """Collects the mapper configuration with a classifier and OID generator.
+ """
+ __implements__ = interfaces.IMapperConfiguration
+ mappers = None
+ classifier = None
+ oid_gen = None
+ initializers = None
+
+ def __init__(self, mappers, classifier, oid_gen):
+ self.mappers = mappers
+ self.classifier = classifier
+ self.oid_gen = oid_gen
+ self.initializers = []
+
+ def check(self):
+ for name, mapper in self.mappers.items():
+ mapper.check(name)
Property changes on: Products.Ape/trunk/lib/apelib/core/mapper.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/oidgen.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/oidgen.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/oidgen.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,39 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Standard oid generators.
+
+$Id$
+"""
+
+from apelib.core.interfaces import IOIDGenerator, IGatewayEvent
+
+
+class SerialOIDGenerator:
+ """Minimal OID generator that generates OIDs in series.
+
+ Does not store the counter in non-volatile storage.
+ """
+ __implements__ = IOIDGenerator
+
+ root_oid = '0'
+ counter = 1
+
+ def __init__(self, root_oid="0"):
+ self.root_oid = root_oid
+
+ def new_oid(self, event):
+ assert IGatewayEvent.isImplementedBy(event)
+ oid = str(self.counter)
+ self.counter += 1
+ return oid
Property changes on: Products.Ape/trunk/lib/apelib/core/oidgen.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/schemas.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/schemas.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/schemas.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,112 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Basic schema implementations.
+
+$Id$
+"""
+
+from types import StringType
+
+from interfaces import IColumnSchema
+
+ok_types = [
+ 'unicode', 'string', 'int', 'long', 'float', 'bool', 'boolean', 'text',
+ 'object', 'classification', 'string:list', 'blob',
+ ]
+
+
+def add_column_type(t):
+ """Adds an allowable column type."""
+ assert isinstance(t, StringType)
+ if t not in ok_types:
+ ok_types.append(t)
+
+
+class ColumnSchema:
+ """Defines the schema of one column."""
+
+ __implements__ = IColumnSchema
+ name = None
+ type = None
+ unique = None
+
+ def __init__(self, name, type='string', unique=0):
+ assert type in ok_types, type
+ self.name = name
+ self.type = type
+ self.unique = not not unique
+
+ def get_columns(self):
+ return [self]
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ if (other.name == self.name) and (other.type == self.type) and (
+ other.unique == self.unique):
+ return 1 # Same
+ return 0 # Different
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return 'ColumnSchema(%s, %s, %s)' % (
+ repr(self.name), repr(self.type), repr(self.unique))
+
+# b/w compat.
+FieldSchema = ColumnSchema
+
+
+class RowSequenceSchema:
+ """Defines a schema for a sequence of rows, including row count limits.
+ """
+ def __init__(self, columns=(), min_rows=0, max_rows=0):
+ # max_rows == 0 means unlimited.
+ assert (max_rows == 0 or max_rows >= min_rows)
+ self.min_rows = min_rows
+ self.max_rows = max_rows
+ self.columns = []
+ self.column_names = {}
+ for c in columns:
+ self._add(c)
+
+ def get_columns(self):
+ res = []
+ for f in self.columns:
+ res.extend(f.get_columns())
+ return res
+
+ def _add(self, c):
+ if self.column_names.has_key(c.name):
+ raise KeyError, 'Duplicate column name: %s' % c.name
+ self.column_names[c.name] = 1
+ self.columns.append(c)
+
+ def add(self, name, type='string', unique=0):
+ self._add(ColumnSchema(name, type, unique))
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ if (self.columns == other.columns) and (
+ self.min_rows == other.min_rows) and (
+ self.max_rows == other.max_rows):
+ return 1 # Same
+ return 0 # Different
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __repr__(self):
+ return 'RowSequenceSchema(%s, min_rows=%s, max_rows=%s)' % (
+ repr(self.columns), repr(self.min_rows), repr(self.max_rows))
Property changes on: Products.Ape/trunk/lib/apelib/core/schemas.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/core/serializers.py
===================================================================
--- Products.Ape/trunk/lib/apelib/core/serializers.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/core/serializers.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,253 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Standard serializers.
+
+$Id$
+"""
+
+from types import StringType
+
+from interfaces import ISerializer, IFullObjectSerializer
+from interfaces import DeserializationError, SerializationError
+from schemas import ColumnSchema
+
+
+class CompositeSerializer:
+ """Full serializer based on partial serializers.
+ """
+ __implements__ = IFullObjectSerializer
+ schema = None
+
+ def __init__(self, base=None):
+ self._part_names = {} # { name -> 1 }
+ self._parts = [] # [(name, serializer)] -- Order matters.
+ self._final_parts = [] # [(name, serializer)]
+ if base is not None:
+ self._part_names.update(base._part_names)
+ self._parts[:] = base._parts
+ self._final_parts[:] = base._final_parts
+ self._update_schema()
+
+ def _update_schema(self):
+ self.schema = {}
+ for name, serializer in self.get_serializers():
+ s = serializer.schema
+ if s is not None:
+ self.schema[name] = s
+
+ def add(self, name, serializer, force=0, final=0):
+ if self._part_names.has_key(name):
+ if not force:
+ raise KeyError, "Serializer name %s in use" % repr(name)
+ self.removeSerializer(name)
+ if final:
+ self._final_parts.append((name, serializer))
+ else:
+ self._parts.append((name, serializer))
+ self._part_names[name] = 1
+ self._update_schema()
+
+ def remove(self, name):
+ if not self._part_names.has_key(name):
+ raise KeyError, "Serializer name %s not in use" % repr(name)
+ for lst in (self._parts, self._final_parts):
+ for i in range(len(lst)):
+ if lst[i][0] == name:
+ del lst[i]
+ break
+ del self._part_names[name]
+ self._update_schema()
+
+ def has(self, name):
+ return self._part_names.has_key(name)
+
+ def get_serializers(self):
+ return self._parts + self._final_parts
+
+ def can_serialize(self, obj):
+ # XXX Need access to the mapper to make this determination.
+ return 1
+## if not hasattr(obj, '__class__'):
+## return 0
+## c = obj.__class__
+## return (c.__module__ == self._module and c.__name__ == self._name)
+
+ def has_base(self, klass, base_name):
+ try:
+ n = '%s.%s' % (klass.__module__, klass.__name__)
+ except AttributeError:
+ return False
+ if n == base_name:
+ return True
+ for b in klass.__bases__:
+ if self.has_base(b, base_name):
+ return True
+ return False
+
+ def serialize(self, event):
+ if event.mapper.class_name:
+ assert self.has_base(
+ event.obj.__class__, event.mapper.class_name), (
+ event.obj, event.mapper.class_name)
+ else:
+ raise RuntimeError("Mapper '%s' is abstract" % event.mapper.name)
+ full_state = {}
+ for name, s in self.get_serializers():
+ event.serializer_name = name
+ state = s.serialize(event)
+ if state is not None:
+ full_state[name] = state
+ return full_state
+
+ def deserialize(self, event, full_state):
+ if event.mapper.class_name:
+ assert self.has_base(
+ event.obj.__class__, event.mapper.class_name), (
+ event.obj, event.mapper.class_name)
+ for name, s in self.get_serializers():
+ state = full_state.get(name)
+ event.serializer_name = name
+ s.deserialize(event, state)
+
+ def new_instance(self, event):
+ if event.classification is None:
+ # Can't do anything without the classification.
+ return None
+ cn = event.classification.get('class_name')
+ if cn is None:
+ # Fall back to the default
+ cn = event.mapper.class_name
+ pos = cn.rfind('.')
+ if pos < 0:
+ raise ValueError, "class_name must include the module"
+ module = cn[:pos]
+ name = cn[pos + 1:]
+ c = event.obj_db.get_class(module, name)
+ if hasattr(c, "__basicnew__"): # ExtensionClass
+ return c.__basicnew__()
+ else:
+ return c.__new__()
+
+
+class PDBSerializer (CompositeSerializer):
+ """Invokes PDB before serialization / deserialization."""
+
+ def serialize(self, event):
+ import pdb
+ pdb.set_trace()
+ return AnyObjectSerializer.serialize(self, event)
+
+ def deserialize(self, event, full_state):
+ import pdb
+ pdb.set_trace()
+ AnyObjectSerializer.deserialize(self, event, full_state)
+
+
+class FullState:
+ """Serializer that reads/writes the entire state of an object."""
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('data', 'object')
+
+ def can_serialize(self, obj):
+ return 1
+
+ def serialize(self, event):
+ return event.obj.__getstate__()
+
+ def deserialize(self, event, state):
+ event.obj.__setstate__(state)
+
+
+
+class IgnoredAttribute:
+ """Serializer that explicitly ignores an attribute
+ """
+ __implements__ = ISerializer
+ schema = None # No storage
+
+ def __init__(self, attrname):
+ self.attrname = attrname
+
+ def can_serialize(self, obj):
+ return 1
+
+ def serialize(self, event):
+ event.ignore(self.attrname)
+ return None
+
+ def deserialize(self, event, state):
+ assert state is None, state
+
+
+class OptionalSerializer:
+ """Serializer wrapper that serializes only if the object is compatible.
+ """
+
+ __implements__ = ISerializer
+ schema = None
+
+ def __init__(self, real, default_state=None):
+ self._real = real
+ self._default_state = default_state
+ self.schema = real.schema
+
+ def can_serialize(self, obj):
+ return 1
+
+ def serialize(self, event):
+ if self._real.can_serialize(event.obj):
+ return self._real.serialize(event)
+ else:
+ return self._default_state
+
+ def deserialize(self, event, state):
+ if self._real.can_serialize(event.obj):
+ self._real.deserialize(event, state)
+ else:
+ if state is not None and state != self._default_state:
+ raise DeserializationError(
+ "Optional serializer unable to install state %s into %s" %
+ (repr(state), repr(event.obj)))
+
+
+class StringDataAttribute:
+ """Serializer of a simple string data attribute."""
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('data', 'string')
+
+ def __init__(self, attrname):
+ self.attrname = attrname
+
+ def can_serialize(self, object):
+ return 1
+
+ def serialize(self, event):
+ attrname = self.attrname
+ assert attrname
+ v = getattr(event.obj, attrname)
+ assert isinstance(v, StringType)
+ event.serialized(attrname, v, 1)
+ return v
+
+ def deserialize(self, event, state):
+ attrname = self.attrname
+ assert attrname
+ assert isinstance(state, StringType)
+ setattr(event.obj, attrname, state)
+ event.deserialized(attrname, state)
+
Property changes on: Products.Ape/trunk/lib/apelib/core/serializers.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,18 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Filesystem gateway package.
+
+$Id$
+"""
+
Property changes on: Products.Ape/trunk/lib/apelib/fs/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/annotated.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/annotated.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/annotated.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,259 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""AnnotatedFilesystem class.
+
+$Id$
+"""
+
+import re
+from time import time
+from types import StringType
+
+
+# This expression matches "\n[sectionname]...\n", where len(sectionname) > 0.
+section_re = re.compile(r'^\[([^\[\]\n]+)\][^\r\n]*(?:\r\n|\r|\n)',
+ re.MULTILINE)
+
+properties_ext = 'properties'
+remainder_ext = 'remainder'
+
+# Match 'foo.properties', 'foo.remainder', 'properties', or 'remainder'.
+# This is for filtering out annotation filenames.
+annotation_re = re.compile('(|.+[.])(%s|%s)$' % (
+ properties_ext, remainder_ext))
+
+# Names of annotations handled by this module
+remainder_ann = 'remainder' # The value is a binary string.
+object_names_ann = 'object_names'
+
+
+class AnnotatedFilesystem:
+ """Filesystem abstraction that adds annotations and automatic extensions.
+
+ Annotations are stored in .properties files.
+ """
+
+ def __init__(self, ops, annotation_prefix='.', hidden_filenames='_'):
+ self.ops = ops
+ self.annotation_prefix = annotation_prefix
+ self.hidden_re = re.compile(hidden_filenames)
+ # _anns_cache: { path -> annotations }
+ self._anns_cache = ShortLivedCache()
+ # _dir_cache: { path -> directory info }
+ self._dir_cache = ShortLivedCache()
+
+ def clear_cache(self):
+ """Clears the cache of annotations and automatic filename extensions.
+
+ Useful after writing to the filesystem.
+ """
+ self._anns_cache.clear()
+ self._dir_cache.clear()
+
+ def invalidate(self, path):
+ """Invalidates info about a path being written.
+ """
+ self._anns_cache.invalidate(path)
+ self._dir_cache.invalidate(path)
+
+ def get_annotation_paths(self, path):
+ """Returns the property and remainder paths for a path.
+ """
+ ops = self.ops
+ if ops.isdir(path):
+ base_fn = ops.join(path, self.annotation_prefix)
+ else:
+ dirname, filename = ops.split(path)
+ base_fn = ops.join(dirname, '%s%s.' % (
+ self.annotation_prefix, filename))
+ return (base_fn + properties_ext, base_fn + remainder_ext)
+
+ def get_annotations(self, path):
+ """Reads the annotations for a path."""
+ res = self._anns_cache.get(path)
+ if res is not None:
+ return res
+ props_fn, rem_fn = self.get_annotation_paths(path)
+ res = {}
+ try:
+ data = self.ops.readfile(rem_fn, 0)
+ except IOError:
+ # The remainder file does not exist.
+ pass
+ else:
+ res[remainder_ann] = data
+ # Note properties file can override the remainder.
+ try:
+ data = self.ops.readfile(props_fn, 1)
+ except IOError:
+ # The properties file apparently does not exist
+ self._anns_cache.set(path, res)
+ return res
+ pos = 0
+ prev_section_name = None
+ while 1:
+ match = section_re.search(data, pos)
+ if match is None:
+ endpos = len(data)
+ else:
+ endpos = match.start()
+ if prev_section_name is not None:
+ # get the data and decode.
+ section = data[pos:endpos].replace('[[', '[')
+ res[prev_section_name] = section
+ if match is None:
+ break
+ else:
+ prev_section_name = match.group(1)
+ pos = match.end()
+ self._anns_cache.set(path, res)
+ return res
+
+ def check_annotation_name(self, ann_name):
+ if (not isinstance(ann_name, StringType)
+ or not ann_name
+ or '[' in ann_name
+ or ']' in ann_name
+ or '\n' in ann_name):
+ raise ValueError(ann_name)
+
+ def write_annotations(self, path, anns):
+ props_fn, rem_fn = self.get_annotation_paths(path)
+ props_data = ''
+ rem_data = ''
+ items = anns.items()
+ items.sort()
+ for name, value in items:
+ # Write a section of the properties file.
+ props_data += self._format_section(name, value)
+ self._write_or_remove(props_fn, 1, props_data)
+ self._write_or_remove(rem_fn, 0, rem_data)
+ self._anns_cache.invalidate(path)
+ # The file might be new, so invalidate the directory.
+ self._dir_cache.invalidate(self.ops.dirname(path))
+
+ def _format_section(self, name, text):
+ s = '[%s]\n%s\n' % (name, text.replace('[', '[['))
+ if not text.endswith('\n'):
+ s += '\n'
+ return s
+
+ def _write_or_remove(self, fn, as_text, data):
+ """If data is provided, write it. Otherwise remove the file.
+ """
+ ops = self.ops
+ if data:
+ ops.writefile(fn, as_text, data)
+ else:
+ if ops.exists(fn):
+ ops.remove(fn)
+
+ def is_legal_filename(self, fn):
+ ap = self.annotation_prefix
+ if (not fn or
+ (fn.startswith(ap) and annotation_re.match(fn, len(ap)))
+ or self.hidden_re.match(fn) is not None):
+ return 0
+ return 1
+
+ def compute_contents(self, path, allow_missing=0):
+ """Returns the name translations for a directory. Caches the results.
+
+ Returns ({filename: name}, {name: filename}).
+ """
+ res = self._dir_cache.get(path)
+ if res is not None:
+ return res
+
+ try:
+ fns = self.ops.listdir(path)
+ except OSError:
+ if allow_missing:
+ return {}, {}
+ raise
+
+ obj_list = [] # [name]
+ trans = {} # { base name -> filename with extension or None }
+ filenames = filter(self.is_legal_filename, fns)
+ anns = self.get_annotations(path)
+ text = anns.get(object_names_ann)
+ if text:
+ # Prepare a dictionary of translations from basename to filename.
+ for fn in filenames:
+ if '.' in fn:
+ base, ext = fn.split('.', 1)
+ if trans.has_key(base):
+ # Name collision: two or more files have the same base
+ # name. Don't strip the extensions for any of them.
+ trans[base] = None
+ else:
+ trans[base] = fn
+ else:
+ trans[fn] = None
+ obj_list = [line.strip() for line in text.split('\n')]
+ for obj_name in obj_list:
+ if '.' in obj_name:
+ # An object name uses an extension. Don't translate
+ # any name that uses the same base name.
+ base, ext = obj_name.split('.', 1)
+ trans[base] = None
+
+ fn_to_name = {}
+ for fn in filenames:
+ fn_to_name[fn] = fn
+ # Translate the file names to object names.
+ for obj_name in obj_list:
+ fn = trans.get(obj_name)
+ if fn:
+ fn_to_name[fn] = obj_name
+ name_to_fn = {}
+ for fn, name in fn_to_name.items():
+ name_to_fn[name] = fn
+ res = (fn_to_name, name_to_fn)
+ self._dir_cache.set(path, res)
+ return res
+
+
+class ShortLivedCache:
+ """Simple short-lived object cache.
+ """
+ def __init__(self, lifetime=1):
+ # The default lifetime is 1 second.
+ self.lifetime = lifetime
+ self.data = {}
+ self.expiration = time() + lifetime
+
+ def get(self, key, default=None):
+ now = time()
+ if now >= self.expiration:
+ self.data.clear()
+ return default
+ res = self.data.get(key, default)
+ return res
+
+ def set(self, key, value):
+ now = time()
+ if now >= self.expiration:
+ self.data.clear()
+ self.expiration = now + self.lifetime
+ self.data[key] = value
+
+ def invalidate(self, key):
+ try:
+ del self.data[key]
+ except KeyError:
+ pass
+
+ def clear(self):
+ self.data.clear()
Property changes on: Products.Ape/trunk/lib/apelib/fs/annotated.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/base.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/base.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/base.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,31 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Base implementation for FS gateways.
+
+$Id$
+"""
+
+class FSGatewayBase:
+ """Base implementation for FS gateways."""
+
+ schema = None
+
+ def __init__(self, conn_name='fs'):
+ self.conn_name = conn_name
+
+ def get_connection(self, event):
+ return event.connections[self.conn_name]
+
+ def get_sources(self, event):
+ return None
Property changes on: Products.Ape/trunk/lib/apelib/fs/base.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/classification.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/classification.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/classification.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,71 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Filesystem classification annotation.
+
+$Id$
+"""
+
+from apelib.core.interfaces import IGateway, LoadError, OIDConflictError
+from apelib.core.schemas import ColumnSchema
+
+from base import FSGatewayBase
+
+
+class FSClassificationAnnotation(FSGatewayBase):
+ """Gateway for storing classification data."""
+
+ __implements__ = IGateway
+
+ schema = ColumnSchema('classification', 'classification')
+
+ def load(self, event):
+ fs_conn = self.get_connection(event)
+ oid = event.oid
+ classification = {'node_type': fs_conn.read_node_type(oid)}
+ text = fs_conn.read_annotation(oid, 'classification', '')
+ if text:
+ lines = text.split('\n')
+ for line in lines:
+ if '=' in line:
+ k, v = line.split('=', 1)
+ classification[k.strip()] = v.strip()
+ classification['extension'] = fs_conn.read_extension(oid)
+ classification['subpath'] = fs_conn.get_subpath(oid)
+ return classification, text.strip()
+
+ def store(self, event, state):
+ # state is a classification
+ fs_conn = self.get_connection(event)
+ oid = event.oid
+ if event.is_new:
+ # Don't overwrite existing data
+ try:
+ fs_conn.read_node_type(oid)
+ except LoadError:
+ # Nothing exists yet.
+ pass
+ else:
+ # Something exists. Don't overwrite it.
+ raise OIDConflictError(oid)
+ items = state.items()
+ items.sort()
+ text = []
+ for k, v in items:
+ if k == 'extension':
+ fs_conn.suggest_extension(oid, v)
+ else:
+ text.append('%s=%s' % (k, v))
+ text = '\n'.join(text)
+ fs_conn.write_annotation(oid, 'classification', text)
+ return text.strip()
Property changes on: Products.Ape/trunk/lib/apelib/fs/classification.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/connection.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/connection.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/connection.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,703 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Filesystem connection class.
+
+$Id$
+"""
+
+import os
+
+from apelib.core.interfaces import ITPCConnection, ISourceRepository, LoadError
+
+from interfaces import IFSReader, IFSWriter, FSReadError, FSWriteError
+from fileops import StandardFileOperations
+from annotated import AnnotatedFilesystem, object_names_ann
+from oidtable import OIDTable
+
+DEBUG = os.environ.get('APE_DEBUG_FS')
+
+
+# For a node_type_ann, the value is 'f' (file) or 'd' (directory)
+node_type_ann = '@node_type'
+
+# data_ann holds the content of a file. It is not valid for directories.
+data_ann = '@data'
+
+# file_list_ann holds the content of a directory. It is not valid for files.
+file_list_ann = '@files'
+
+# The suggested filename extension.
+suggested_extension_ann = '@s_ext'
+
+
+class FSConnection:
+ """Reads / writes files with annotations.
+ """
+ __implements__ = IFSReader, IFSWriter, ITPCConnection, ISourceRepository
+
+ basepath = ''
+ root_oid = '0'
+
+ # When app_filename is set, FSConnection translates paths, placing
+ # the application object at basepath and the root at
+ # (basepath)/_root.
+ app_filename = 'Application'
+
+ def __init__(self, basepath, annotation_prefix='.', hidden_filenames='_',
+ ops=None):
+ # These attributes are used for both reading and writing.
+ self.basepath = basepath
+ if ops is None:
+ ops = StandardFileOperations()
+ self.ops = ops
+ self.afs = AnnotatedFilesystem(
+ ops, annotation_prefix, hidden_filenames)
+ self.table = OIDTable()
+
+ # These attributes are used only for writing.
+ self._final = 0 # True if second phase of commit.
+ # _pending holds the data to be written.
+ # _pending: { oid -> { annotation_name -> data } }
+ self._pending = {}
+ self._script = None # [(instruction, *args)]
+ self._tmp_subpaths = {} # { oid: subpath }
+
+ def reset(self):
+ self._final = 0
+ self._pending.clear()
+ self.afs.clear_cache()
+ self._script = None
+ self._tmp_subpaths.clear()
+
+ #
+ # IFSReader implementation.
+ #
+
+ def get_subpath(self, oid):
+ p = self.table.get_path(self.root_oid, oid)
+ if p is None:
+ return self._tmp_subpaths.get(oid)
+ if self.app_filename:
+ # Translate paths.
+ if p and p[0] == self.app_filename:
+ # Place the application object at basepath.
+ return p[1:]
+ else:
+ # Everything else goes in "_root".
+ return ['_root'] + p
+ else:
+ return p
+
+ def get_path(self, oid):
+ p = self.get_subpath(oid)
+ if p is None:
+ raise LoadError(oid)
+ return self.ops.join(self.basepath, *p)
+
+ def read_node_type(self, oid):
+ path = self.get_path(oid)
+ if not self.ops.exists(path):
+ raise LoadError("%s does not exist" % path)
+ return self.ops.isdir(path) and 'd' or 'f'
+
+ def read_data(self, oid, allow_missing=0, as_text=0):
+ # Return a string.
+ try:
+ path = self.get_path(oid)
+ return self.ops.readfile(path, as_text)
+ except (LoadError, IOError):
+ if allow_missing:
+ return None
+ raise
+
+ def read_directory(self, oid, allow_missing=0):
+ # Return a sequence of (object_name, child_oid).
+ path = self.get_path(oid)
+ contents = self.afs.compute_contents(path, allow_missing)
+ fn_to_name, name_to_fn = contents
+ children = self.table.get_children(oid)
+ if children is None:
+ children = {}
+ # Remove vanished children from the OID table.
+ for filename, child_oid in children.items():
+ if not fn_to_name.has_key(filename):
+ self.table.remove(oid, filename)
+ # XXX Need to garbage collect descendants.
+ # Return the current children.
+ return [(objname, children.get(filename))
+ for filename, objname in fn_to_name.items()]
+
+ def read_annotation(self, oid, name, default=None):
+ path = self.get_path(oid)
+ annotations = self.afs.get_annotations(path)
+ return annotations.get(name, default)
+
+ def read_object_name(self, oid):
+ parents = self.table.get_parents(oid)
+ parent_oid, filename = parents[0]
+ parent_path = self.get_path(parent_oid)
+ contents = self.afs.compute_contents(parent_path)
+ fn_to_name, name_to_fn = contents
+ return fn_to_name[filename]
+
+ def read_extension(self, oid):
+ path = self.get_path(oid)
+ stuff, ext = self.ops.splitext(path)
+ return ext
+
+ def assign_existing(self, oid, children):
+ """See IFSReader.
+ """
+ dir_path = self.get_path(oid)
+ contents = self.afs.compute_contents(dir_path)
+ fn_to_name, name_to_fn = contents
+ existing = self.table.get_children(oid) or {}
+ for name, child_oid in children:
+ assert child_oid
+ if existing.has_key(name) and existing[name] != child_oid:
+ raise FSReadError("assign_existing() doesn't override")
+ filename = name_to_fn[name]
+ self.table.add(oid, filename, child_oid)
+
+ def read_mod_time(self, oid, default=0):
+ """Returns the time an object was last modified.
+
+ Since objects are split into up to three files, this
+ implementation returns the modification time of the most
+ recently modified of the three.
+ """
+ path = self.get_path(oid)
+ extra = self.afs.get_annotation_paths(path)
+ maxtime = -1
+ for p in (path,) + tuple(extra):
+ try:
+ t = self.ops.getmtime(p)
+ except OSError:
+ pass
+ else:
+ if t > maxtime:
+ maxtime = t
+ if maxtime == -1:
+ maxtime = default
+ return maxtime
+
+ def _get_paths_mtime(self, paths):
+ t = []
+ for path in paths:
+ try:
+ t.append(self.ops.getmtime(path))
+ except OSError:
+ t.append(None)
+ return t
+
+ def get_sources(self, oid):
+ path = self.get_path(oid)
+ extra = self.afs.get_annotation_paths(path)
+ paths = (path,) + tuple(extra)
+ t = self._get_paths_mtime(paths)
+ return {(self, paths): t}
+
+ #
+ # ISourceRepository implementation.
+ #
+
+ def poll(self, sources):
+ """ISourceRepository implementation.
+
+ Returns the changed items.
+ """
+ res = {}
+ for source, t in sources.items():
+ myself, paths = source
+ assert myself is self
+ new_t = self._get_paths_mtime(paths)
+ if t != new_t:
+ res[source] = new_t
+ return res
+
+ #
+ # IFSWriter implementation.
+ #
+ def _queue(self, oid, name, data):
+ """Queues data to be written at commit time.
+
+ 'name' is the name of the annotation.
+ """
+ m = self._pending
+ anns = m.get(oid)
+ if anns is None:
+ anns = {}
+ m[oid] = anns
+ if anns.has_key(name):
+ if anns[name] != data:
+ raise FSWriteError(
+ 'Conflicting data storage at %s (%s)' %
+ (oid, name))
+ else:
+ anns[name] = data
+
+ def write_node_type(self, oid, data):
+ if data not in ('d', 'f'):
+ raise FSWriteError(
+ 'Node type must be "d" or "f" at %s' % oid)
+ self._queue(oid, node_type_ann, data)
+
+ def write_data(self, oid, data, as_text=0):
+ if not isinstance(data, type('')):
+ raise FSWriteError(
+ 'Data for a file must be a string at %s' % oid)
+ self._queue(oid, data_ann, (data, as_text))
+
+ def write_directory(self, oid, data):
+ if isinstance(data, type('')): # XXX Need a better check
+ raise FSWriteError(
+ 'Data for a directory must be a list or tuple at %s' % oid)
+ is_legal_filename = self.afs.is_legal_filename
+ for objname, child_oid in data:
+ assert child_oid, "%s lacks a child_oid" % repr(objname)
+ if not is_legal_filename(objname):
+ raise FSWriteError(
+ 'Not a legal object name: %s' % repr(objname))
+ self._queue(oid, file_list_ann, data)
+
+ def write_annotation(self, oid, name, data):
+ self.afs.check_annotation_name(name)
+ self._queue(oid, name, data)
+
+ def suggest_extension(self, oid, ext):
+ self._queue(oid, suggested_extension_ann, ext)
+
+
+ def _prepare_container_changes(self, path, data):
+ """Prepares the new dictionary of children for a directory.
+
+ Chooses filenames for all of the directory's children.
+ Prevents filename collisions involving extensions by enforcing
+ the rule that if there is some object named 'foo.*', an object
+ named 'foo' may not have an automatic extension.
+
+ 'path' is a filesystem path or None. 'data' is a list of
+ (objname, child_oid). Returns {filename: child_oid}.
+ """
+ if path:
+ existing = self.afs.compute_contents(path)[1]
+ # existing contains {objname: filename}
+ else:
+ existing = {}
+
+ reserved = {} # { object name stripped of extension: 1 }
+ for objname, child_oid in data:
+ if '.' in objname:
+ base, ext = objname.split('.', 1)
+ reserved[base] = 1
+ new_filenames = {}
+ for objname, child_oid in data:
+ filename = objname
+ if '.' not in filename and not reserved.has_key(objname):
+ # This object is eligible for an automatic extension.
+ fn = existing.get(objname)
+ if fn:
+ # Use the existing filename.
+ filename = fn
+ else:
+ anns = self._pending.get(child_oid)
+ if anns:
+ extension = anns.get(suggested_extension_ann)
+ if extension:
+ if not extension.startswith('.'):
+ extension = '.' + extension
+ filename = objname + extension
+ new_filenames[objname] = filename
+
+ fn_oid = {}
+ for objname, child_oid in data:
+ fn_oid[new_filenames[objname]] = child_oid
+ return fn_oid
+
+
+ def _prepare(self):
+ """Prepares for transaction commit.
+
+ Does some early checking while it's easy to bail out. This
+ helps avoid exceptions during the second phase of transaction
+ commit.
+ """
+ container_changes = {} # {oid: {filename: child_oid}}
+ for oid, anns in self._pending.items():
+ if self.table.get_parents(oid) or oid == self.root_oid:
+ # This is an existing object. It has a path.
+ p = self.get_subpath(oid)
+ if p is None:
+ raise FSWriteError(
+ "No path known for OID %s" % repr(oid))
+ if p:
+ info = self.ops.join(*p)
+ path = self.ops.join(self.basepath, info)
+ else:
+ info = '/'
+ path = self.basepath
+ if not self.ops.exists(path):
+ path = None
+ else:
+ # This is a new object. It does not have a path yet.
+ path = None
+ info = 'new object: %s' % repr(oid)
+ if path and not self.ops.canwrite(path):
+ raise FSWriteError(
+ "Can't get write access. %s" % info)
+
+ # type must be provided and must always be either 'd' or 'f'.
+ if not anns.has_key(node_type_ann):
+ raise FSWriteError(
+ 'Node type not specified for %s' % info)
+ t = anns[node_type_ann]
+ dir = self.ops.dirname(oid)
+
+ if t == 'f':
+ # Writing a file
+ data, as_text = anns[data_ann]
+ if anns.has_key(file_list_ann):
+ raise FSWriteError(
+ "Files can't have directory contents. %s" % info)
+ if path and self.ops.isdir(path):
+ raise FSWriteError(
+ "A directory exists where a file is to be written. %s"
+ % info)
+
+ elif t == 'd':
+ # Writing a directory
+ data = anns[file_list_ann]
+ if anns.has_key(data_ann):
+ raise FSWriteError(
+ "Directories can't have file data. %s" % info)
+ if path and not self.ops.isdir(path):
+ raise FSWriteError(
+ "A file exists where a directory is to be written. %s"
+ % info)
+ fn_oid = self._prepare_container_changes(path, data)
+ container_changes[oid] = fn_oid
+
+ else:
+ raise FSWriteError('Node type must be "d" or "f". %s' % info)
+ self._script = self._generate_script(container_changes)
+
+
+ def _generate_script(self, container_changes):
+ """Generates the script for committing the transaction.
+
+ Returns [(instruction, *args)].
+ """
+ # container_changes is {oid: {filename: child_oid}}
+ # script is [(instruction, *args)]
+ script = []
+ script.append(("clear_temp",))
+
+ # Compute the number of times each relevant child_oid is to
+ # be linked or unlinked.
+ # counts is {child_oid: [link_count, unlink_count]}
+ counts = {}
+ def increment(child_oid, index, counts=counts):
+ c = counts.get(child_oid)
+ if c is None:
+ counts[child_oid] = c = [0, 0]
+ c[index] += 1
+
+ for oid, new_children in container_changes.items():
+ old_children = self.table.get_children(oid)
+ if old_children is None:
+ old_children = {}
+ for filename, child_oid in new_children.items():
+ if old_children.get(filename) == child_oid:
+ continue # No change.
+ # Adding a link
+ increment(child_oid, 0)
+ if DEBUG:
+ print 'fs: add link %s/%s -> %s' % (
+ oid, filename, child_oid)
+ for filename, child_oid in old_children.items():
+ if new_children.get(filename) == child_oid:
+ continue # No change.
+ # Removing a link
+ increment(child_oid, 1)
+ if DEBUG:
+ print 'fs: del link %s/%s -> %s' % (
+ oid, filename, child_oid)
+
+ # Add steps to the script to move objects to a temporary directory,
+ # then delete objects.
+ to_delete = [] # [oid]
+ for child_oid, (links, unlinks) in counts.items():
+ if not self.table.get_parents(child_oid):
+ # A new object should be added once or not at all.
+ if links > 1:
+ raise FSWriteError(
+ "Multiple links to %s" % repr(child_oid))
+ else:
+ # An existing object should be moved, removed, or left alone.
+ if links > 1 or (links > 0 and unlinks < 1):
+ raise FSWriteError(
+ "Multiple links to %s" % repr(child_oid))
+ if links > 0:
+ # Moving.
+ script.append(("move_to_temp", child_oid))
+ elif unlinks > 0:
+ # Deleting.
+ to_delete.append(child_oid)
+
+ for child_oid in to_delete:
+ script.append(("delete", child_oid))
+ script.append(("write_all", container_changes))
+ if self.app_filename and container_changes.has_key(self.root_oid):
+ # Link or unlink the application object.
+ root_changes = container_changes[self.root_oid]
+ script.append(("link_app", root_changes.has_key(self.app_filename)))
+ script.append(("clear_temp",))
+ return script
+
+ def _rmrf(self, path):
+ """Delete ala 'rm -rf'.
+
+ If it's a file, remove it. If it's a directory, remove all of it.
+ If it doesn't exist, quietly ignore it.
+ """
+ ops = self.ops
+ if ops.exists(path):
+ if ops.isdir(path):
+ ops.rmtree(path)
+ else:
+ ops.remove(path)
+
+ def _do_clear_temp(self):
+ """Script command: zap the temporary directory.
+ """
+ ops = self.ops
+ path = ops.join(self.basepath, '_tmp')
+ self._rmrf(path)
+ self._tmp_subpaths.clear()
+
+ def _move_base_contents(self, src, dest):
+ """Move the base directory's contents, but not the directory.
+
+ Also leaves behind the _root and _tmp subdirectories.
+ """
+ ops = self.ops
+ if not ops.exists(dest):
+ ops.makedirs(dest)
+ for fn in ops.listdir(src):
+ if fn not in ('_root', '_tmp'):
+ ops.rename(ops.join(src, fn), ops.join(dest, fn))
+
+ def _move_item(self, src, dest):
+ """Moves a file or directory.
+
+ For files, also moves annotations next to the file.
+ """
+ ops = self.ops
+ parent = ops.dirname(dest)
+ if not ops.exists(parent):
+ ops.makedirs(parent)
+ if not ops.isdir(src):
+ # Move the annotation files also.
+ extra_src = self.afs.get_annotation_paths(src)
+ extra_dest = self.afs.get_annotation_paths(dest)
+ for s, d in zip(extra_src, extra_dest):
+ if ops.exists(s):
+ ops.rename(s, d)
+ ops.rename(src, dest)
+
+ def _do_move_to_temp(self, oid):
+ """Script command: move an object to the temporary directory.
+ """
+ ops = self.ops
+ src = self.get_path(oid)
+ if src == self.basepath:
+ # Move the base by moving most of the contents
+ # instead of the actual directory.
+ dest_sub = ('_tmp', 'base', 'data')
+ dest = ops.join(self.basepath, *dest_sub)
+ self._move_base_contents(src, dest)
+ else:
+ # Move an object.
+ dest_sub = ('_tmp', 'oid.%s' % oid, 'data')
+ dest = ops.join(self.basepath, *dest_sub)
+ self._move_item(src, dest)
+ self._tmp_subpaths[oid] = dest_sub
+ parents = self.table.get_parents(oid)
+ for parent_oid, filename in parents:
+ self.table.remove(parent_oid, filename)
+
+ def _restore(self, oid):
+ """Moves an object in the temp directory into the object system.
+ """
+ ops = self.ops
+ dest = self.get_path(oid)
+ src_sub = self._tmp_subpaths[oid]
+ src = ops.join(self.basepath, *src_sub)
+ if dest == self.basepath:
+ self._move_base_contents(src, dest)
+ else:
+ self._move_item(src, dest)
+ del self._tmp_subpaths[oid]
+
+ def _do_delete(self, oid):
+ """Script command: delete an object.
+ """
+ ops = self.ops
+ path = self.get_path(oid)
+ if path == self.basepath:
+ # Delete the contents of the base directory.
+ for fn in ops.listdir(path):
+ if not fn in ('_root', '_tmp'):
+ self._rmrf(ops.join(self.basepath, fn))
+ else:
+ # Delete an object.
+ if not ops.isdir(path):
+ # Delete the annotation files also.
+ extra = self.afs.get_annotation_paths(path)
+ for s in extra:
+ if ops.exists(s):
+ ops.remove(s)
+ self._rmrf(path)
+ if self._tmp_subpaths.has_key(oid):
+ del self._tmp_subpaths[oid]
+ parents = self.table.get_parents(oid)
+ for parent_oid, filename in parents:
+ self.table.remove(parent_oid, filename)
+ # XXX Need to garbage collect descendants in the OID table.
+
+
+ def _do_write_all(self, container_changes):
+ """Script command: write all objects.
+
+ Uses multiple passes.
+
+ container_changes: {oid: {filename: child_oid}}
+ """
+ ops = self.ops
+ while self._pending:
+ written = 0
+ for oid, anns in self._pending.items():
+ p = self.get_subpath(oid)
+ if p is None:
+ # Not linked into the object system yet.
+ # Try again on the next pass.
+ continue
+ path = ops.join(self.basepath, *p)
+ t = anns[node_type_ann]
+ if not ops.exists(path):
+ if t == 'd':
+ ops.mkdir(path)
+ to_write = {}
+ for name, value in anns.items():
+ if (name == node_type_ann
+ or name == suggested_extension_ann):
+ # Doesn't need to be written.
+ continue
+ elif name == data_ann:
+ data, as_text = value
+ ops.writefile(path, as_text, data)
+ elif name == file_list_ann:
+ # Prepare the object_names annotation.
+ object_names = []
+ for objname, child_oid in value:
+ object_names.append(objname)
+ to_write[object_names_ann] = '\n'.join(object_names)
+ # Move objects from the temporary directory.
+ fn_oid = container_changes.get(oid)
+ if fn_oid:
+ for filename, child_oid in fn_oid.items():
+ self.table.add(oid, filename, child_oid)
+ if self._tmp_subpaths.has_key(child_oid):
+ self._restore(child_oid)
+ self.afs.invalidate(path)
+ else:
+ to_write[name] = value
+ self.afs.write_annotations(path, to_write)
+ self.afs.invalidate(self.ops.dirname(path))
+ # This object has been written.
+ written += 1
+ del self._pending[oid]
+
+ if not written:
+ # Nothing was written in this pass. This means that
+ # the rest of the queued objects are not actually
+ # linked into the object system. Toss them.
+ if DEBUG:
+ tossing = self._pending.keys()
+ tossing.sort()
+ print "fs: tossing %s" % ', '.join(tossing)
+ break
+
+
+ def _do_link_app(self, app_exists):
+ """Script command: link or unlink the application object at the root.
+ """
+ path = self.ops.join(self.basepath, '_root', self.app_filename)
+ if app_exists:
+ # The root has an application. Represent it with a directory.
+ if not self.ops.exists(path):
+ self.ops.makedirs(path)
+ else:
+ # The root does not have an application. Remove it.
+ if self.ops.exists(path):
+ self.ops.rmtree(path)
+
+ #
+ # ITPCConnection implementation
+ #
+
+ def sortKey(self):
+ return self.basepath
+
+ def getName(self):
+ return self.basepath
+
+ def connect(self):
+ ops = self.ops
+ if not ops.exists(self.basepath):
+ ops.makedirs(self.basepath)
+ if self.app_filename:
+ # If there are objects at basepath, create a _root
+ # containing an application also.
+ contents = self.afs.compute_contents(self.basepath)
+ fn_to_name, name_to_fn = contents
+ if fn_to_name:
+ self._do_link_app(1)
+
+ def begin(self):
+ self.afs.clear_cache()
+
+ def vote(self):
+ """Do some early verification
+
+ This is done while the transaction can still be vetoed safely.
+ """
+ self._prepare()
+ self._final = 1
+
+ def abort(self):
+ self.reset()
+
+ def finishWrite(self):
+ if self._final:
+ for code in self._script:
+ m = getattr(self, '_do_%s' % code[0])
+ m(*code[1:])
+
+ def finishCommit(self):
+ if self._final:
+ self.reset()
+
+ def close(self):
+ self.reset()
Property changes on: Products.Ape/trunk/lib/apelib/fs/connection.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/fileops.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/fileops.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/fileops.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,60 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Filesystem I/O abstraction.
+
+$Id$
+"""
+
+import os
+import shutil
+
+
+class StandardFileOperations:
+ """Standard filesystem interaction implementation.
+
+ Provides the operations needed by FSConnection.
+ """
+
+ def __init__(self):
+ self.dirname = os.path.dirname
+ self.exists = os.path.exists
+ self.getmtime = os.path.getmtime
+ self.isdir = os.path.isdir
+ self.join = os.path.join
+ self.listdir = os.listdir
+ self.makedirs = os.makedirs
+ self.mkdir = os.mkdir
+ self.remove = os.remove
+ self.rename = os.rename
+ self.rmtree = shutil.rmtree
+ self.split = os.path.split
+ self.splitext = os.path.splitext
+
+ def readfile(self, path, as_text):
+ f = open(path, as_text and 'rt' or 'rb')
+ try:
+ return f.read()
+ finally:
+ f.close()
+
+ def writefile(self, path, as_text, bytes):
+ f = open(path, as_text and 'wt' or 'wb')
+ try:
+ f.write(bytes)
+ finally:
+ f.close()
+
+ def canwrite(self, path):
+ return os.access(path, os.W_OK)
+
Property changes on: Products.Ape/trunk/lib/apelib/fs/fileops.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/interfaces.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/interfaces.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/interfaces.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,132 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Filesystem-specific interfaces
+
+$Id$
+"""
+
+from Interface import Interface
+
+
+class FSReadError (Exception):
+ """Unable to read data"""
+
+class FSWriteError (Exception):
+ """Unable to write data"""
+
+
+class IFSReader (Interface):
+ """Filesystem reader that supports annotations.
+ """
+
+ def get_subpath(oid):
+ """Returns the tuple path for an oid, relative to the base.
+ """
+
+ def get_path(oid):
+ """Returns the filesystem path for an oid.
+ """
+
+ def read_node_type(oid):
+ """Reads the node type of a filesystem node.
+ """
+
+ def read_data(oid, allow_missing=0, as_text=0):
+ """Reads the main data stream from a file.
+
+ If the allow_missing flag is specified, this method returns
+ None if no such file is found. If as_text is true, the file
+ is read in text mode.
+ """
+
+ def read_directory(oid, allow_missing=0):
+ """Reads the contents of a directory.
+
+ Returns a list of (object_name, child_oid). The child_oid is
+ None for objects not seen before. The application should
+ assign unique OIDs to the newly found children, then tell this
+ object about the assignments through the assignNew() method.
+
+ If the allow_missing flag is specified, this method returns
+ None if no such directory is found.
+ """
+
+ def read_annotation(oid, name, default=None):
+ """Reads a text-based annotation for a file.
+ """
+
+ def read_object_name(oid):
+ """Gets the canonical name for an object.
+
+ Note that this only makes sense when objects can have only one
+ parent.
+ """
+
+ def assign_existing(oid, children):
+ """Assigns OIDs to previously existing objects on the filesystem.
+
+ See read_directory(). children is a list of (object_name, child_oid).
+ """
+
+ def read_extension(oid):
+ """Returns the filename extension for a file.
+ """
+
+ def read_mod_time(oid, default=0):
+ """Returns the last-modified time of a file.
+ """
+
+ def get_sources(oid):
+ """Returns source information for an oid.
+
+ The source information is a mapping that maps
+ (source_repository, path) to a state object. The contents of
+ path and state are specific to a source repository. The
+ source repository (an ISourceRepository) may be polled
+ periodically to freshen the state of objects in caches.
+ """
+
+
+class IFSWriter (Interface):
+ """Filesystem writer that supports annotations.
+ """
+
+ def write_node_type(oid, data):
+ """Writes the node type for a filesystem node.
+
+ 'd' (directory) and 'f' (file) are supported.
+ """
+
+ def write_data(oid, data, as_text=0):
+ """Writes string data to a filesystem node.
+
+ If 'as_text' is true, the file is written in text mode.
+ """
+
+ def write_directory(oid, data):
+ """Writes data to a directory.
+
+ 'data' is a sequence of (object_name, child_oid).
+ """
+
+ def write_annotation(oid, name, data):
+ """Writes a text-based annotation for a filesystem node.
+ """
+
+ def suggest_extension(oid, ext):
+ """Suggests a filename extension for a filesystem node.
+
+ The IFSConnection may use this information to store the file
+ with an automatically appended filename extension.
+ """
Property changes on: Products.Ape/trunk/lib/apelib/fs/interfaces.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/oidtable.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/oidtable.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/oidtable.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,133 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""OIDTable class.
+
+$Id$
+"""
+
+class OIDTable:
+ """Table of (parent_oid, filename, child_oid).
+
+ The oid and filename columns form the primary key. Maintains an index
+ on the child_oid column to allow fast reverse lookups.
+ """
+
+ def __init__(self):
+ self.fwd = {} # { parent_oid : {filename : child_oid} }
+ self.back = {} # { child_oid : [(parent_oid, filename)] }
+
+ def add(self, parent_oid, filename, child_oid):
+ """Adds an association from a parent and filename to a child.
+ """
+ d = self.fwd.get(parent_oid)
+ if d is None:
+ d = {}
+ self.fwd[parent_oid] = d
+ if d.has_key(filename):
+ if d[filename] != child_oid:
+ raise KeyError(
+ "'%s' already has a child named '%s', with OID '%s'"
+ % (parent_oid, filename, d[filename]))
+ else:
+ d[filename] = child_oid
+ p = self.back.get(child_oid)
+ key = (parent_oid, filename)
+ if p is None:
+ p = [key]
+ self.back[child_oid] = p
+ elif key not in p:
+ p.append(key)
+
+ def remove(self, parent_oid, filename):
+ """Removes an association between a parent and a child.
+ """
+ d = self.fwd.get(parent_oid)
+ if not d:
+ return
+ child_oid = d.get(filename)
+ if not child_oid:
+ return
+ del d[filename]
+ if not d:
+ del self.fwd[parent_oid]
+ p = self.back.get(child_oid)
+ key = (parent_oid, filename)
+ if key in p:
+ p.remove(key)
+ if not p:
+ del self.back[child_oid]
+
+ def set_children(self, parent_oid, new_children):
+ """Updates all children for a parent.
+
+ new_children is {filename: child_oid}. Calls self.add() and
+ self.remove() to make all changes.
+ """
+ old_children = self.fwd.get(parent_oid)
+ if old_children is not None:
+ # The dictionary in the table will change as children are
+ # added/removed, so make a copy.
+ old_children = old_children.copy()
+ else:
+ old_children = {}
+ for filename, child_oid in new_children.items():
+ if old_children.has_key(filename):
+ if old_children[filename] != child_oid:
+ # Change this child to a new OID.
+ self.remove(parent_oid, filename)
+ self.add(parent_oid, filename, child_oid)
+ del old_children[filename]
+ else:
+ # Add a new child.
+ self.add(parent_oid, filename, child_oid)
+ # Remove the filenames left over in old_children.
+ for filename, child_oid in old_children.items():
+ self.remove(parent_oid, filename)
+
+ def get_path(self, ancestor_oid, descendant_oid):
+ """Returns the primary path that connects two OIDs.
+
+ The primary path follows the first parent of each OID.
+ """
+ parts = []
+ back_get = self.back.get
+ parts_append = parts.append
+ oid = descendant_oid
+ while oid != ancestor_oid:
+ p = back_get(oid)
+ if not p:
+ # The primary lineage doesn't reach the ancestor.
+ return None
+ # Follow only the first parent.
+ oid, filename = p[0]
+ if oid == descendant_oid:
+ # Circular OID chain.
+ return None
+ parts_append(filename)
+ parts.reverse()
+ return parts
+
+ def get_children(self, parent_oid):
+ """Returns the children of an OID as a mapping of {filename: oid}.
+
+ Do not modify the return value.
+ """
+ return self.fwd.get(parent_oid)
+
+ def get_parents(self, child_oid):
+ """Returns the parents of an OID as a list of (oid, filename).
+
+ Do not modify the return value.
+ """
+ return self.back.get(child_oid)
Property changes on: Products.Ape/trunk/lib/apelib/fs/oidtable.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/params.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/params.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/params.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,95 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Functions for encoding/decoding parameter strings.
+
+$Id$
+"""
+
+import re
+
+token_re = re.compile(r'([" \t]|\\["\\trn])')
+token_replacements = {
+ '\\"': '"',
+ '\\\\': '\\',
+ '\\t': '\t',
+ '\\r': '\r',
+ '\\n': '\n',
+ }
+
+key_re = re.compile(r'[A-Za-z_-][A-Za-z0-9_-]*$')
+
+
+def split_params(s):
+ tokens = re.split(token_re, s)
+ params = []
+ param = []
+ quoting = 0
+ for tok in tokens:
+ if tok:
+ v = token_replacements.get(tok)
+ if v:
+ param.append(v)
+ elif not quoting and (tok == ' ' or tok == '\t'):
+ if param:
+ params.append(''.join(param))
+ param = []
+ else:
+ if tok == '"':
+ quoting = not quoting
+ if not quoting:
+ params.append(''.join(param))
+ param = []
+ else:
+ param.append(tok)
+ leftover = ''.join(param).strip()
+ if leftover:
+ params.append(leftover)
+ return params
+
+
+def escape_param(s):
+ return s.replace('\\', '\\\\').replace('"', '\\"').replace(
+ '\r', '\\r').replace('\n', '\\n').replace('\t', '\\t')
+
+
+def string_to_params(s):
+ """Decodes a string of the format 'a="..." b="..."'.
+
+ Returns a list of (key, value) pairs.
+ """
+ params = split_params(s)
+ res = []
+ for param in params:
+ p = param.split('=', 1)
+ if len(p) == 1:
+ k = p[0]
+ v = ''
+ else:
+ k, v = p
+ res.append((k, v))
+ return res
+
+
+def params_to_string(params):
+ """Encodes a list of (key, value) pairs as a string."""
+ parts = []
+ for k, v in params:
+ if not key_re.match(k):
+ raise ValueError, 'Bad parameter name: %s' % repr(k)
+ if v:
+ parts.append('%s="%s"' % (k, escape_param(v)))
+ else:
+ parts.append(k)
+ return ' '.join(parts)
+
Property changes on: Products.Ape/trunk/lib/apelib/fs/params.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/properties.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/properties.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/properties.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,128 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Filesystem property gateways.
+
+$Id$
+"""
+
+from types import StringType
+
+from apelib.core.interfaces import IGateway
+from apelib.core.schemas import ColumnSchema, RowSequenceSchema
+
+from base import FSGatewayBase
+
+
+token_replacements = {
+ '\\\\': '\\',
+ '\\t': '\t',
+ '\\r': '\r',
+ '\\n': '\n',
+ }
+
+def escape_string(s):
+ return s.replace('\\', '\\\\').replace('\n', '\\n').replace(
+ '\r', '\\r').replace('\t', '\\t')
+
+def unescape_string(s):
+ res = []
+ pos = 0
+ while 1:
+ p = s.find('\\', pos)
+ if p >= 0:
+ res.append(s[pos:p])
+ token = s[p:p+2]
+ c = token_replacements.get(token)
+ if c is not None:
+ # known escape sequence
+ res.append(c)
+ else:
+ # unknown sequence
+ res.append(token)
+ pos = p + 2
+ else:
+ res.append(s[pos:])
+ break
+ return ''.join(res)
+
+
+class FSProperties (FSGatewayBase):
+ """Simple properties to filesystem properties annotation gateway.
+ """
+
+ __implements__ = IGateway
+
+ schema = RowSequenceSchema()
+ schema.add('id', 'string', 1)
+ schema.add('type', 'string')
+ schema.add('data', 'string')
+
+ def __init__(self, annotation='properties', conn_name='fs'):
+ self.annotation = str(annotation)
+ FSGatewayBase.__init__(self, conn_name)
+
+ def load(self, event):
+ fs_conn = self.get_connection(event)
+ text = fs_conn.read_annotation(event.oid, self.annotation, '')
+ res = []
+ if text:
+ lines = text.split('\n')
+ for line in lines:
+ if '=' in line:
+ k, v = line.split('=', 1)
+ if ':' in k:
+ k, t = k.split(':', 1)
+ else:
+ t = 'string'
+ res.append((k, t, unescape_string(v)))
+ res.sort()
+ return res, tuple(res)
+
+ def store(self, event, state):
+ lines = []
+ for k, t, v in state:
+ lines.append('%s:%s=%s' % (k, t, escape_string(v)))
+ lines.sort()
+ text = '\n'.join(lines)
+ fs_conn = self.get_connection(event)
+ fs_conn.write_annotation(event.oid, self.annotation, text)
+ state = list(state)
+ state.sort()
+ return tuple(state)
+
+
+class FSAnnotationData (FSGatewayBase):
+ """Text to filesystem property annotation gateway."""
+
+ __implements__ = IGateway
+
+ schema = ColumnSchema('data', 'string')
+
+ def __init__(self, annotation, conn_name='fs'):
+ self.annotation = str(annotation)
+ FSGatewayBase.__init__(self, conn_name)
+
+ def load(self, event):
+ fs_conn = self.get_connection(event)
+ state = fs_conn.read_annotation(event.oid, self.annotation, '').strip()
+ return state, state
+
+ def store(self, event, state):
+ if not isinstance(state, StringType):
+ raise ValueError('Not a string: %s' % repr(state))
+ state = state.strip()
+ if state:
+ fs_conn = self.get_connection(event)
+ fs_conn.write_annotation(event.oid, self.annotation, state)
+ return state
Property changes on: Products.Ape/trunk/lib/apelib/fs/properties.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/security.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/security.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/security.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,187 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Gateways for storing security information.
+
+$Id$
+"""
+
+from apelib.core.interfaces import IGateway, MappingError
+from apelib.core.schemas import RowSequenceSchema
+from params import string_to_params, params_to_string
+
+from base import FSGatewayBase
+
+
+class FSSecurityAttributes (FSGatewayBase):
+ """Gateway for storing security attributes."""
+
+ __implements__ = IGateway
+
+ schema = RowSequenceSchema()
+ schema.add('declaration_type', 'string')
+ schema.add('role', 'string')
+ schema.add('permission', 'string')
+ schema.add('username', 'string')
+
+ def __init__(self, annotation='security', conn_name='fs'):
+ self.annotation = annotation
+ FSGatewayBase.__init__(self, conn_name)
+
+ def load(self, event):
+ fs_conn = self.get_connection(event)
+ text = fs_conn.read_annotation(event.oid, self.annotation, '')
+ res = []
+ if text:
+ lines = text.split('\n')
+ for line in lines:
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ params = string_to_params(line)
+ if params:
+ decl_type = params[0][0]
+ row = [decl_type, '', '', '']
+ for k, v in params[1:]:
+ k = k.lower()
+ if '_' in k:
+ # temporary backward compatibility
+ k = k.split('_', 1)[0]
+ if k == 'role':
+ row[1] = v
+ elif k == 'permission':
+ row[2] = v
+ elif k == 'username':
+ row[3] = v
+ else:
+ raise ValueError(
+ "Could not read security declaration "
+ "%s for %s" % (repr(line), repr(event.oid)))
+ res.append(tuple(row))
+ res.sort()
+ return res, tuple(res)
+
+
+ def store(self, event, state):
+ lines = []
+ for d, r, p, u in state:
+ params = [(d, '')]
+ if r:
+ params.append(('role', r))
+ if p:
+ params.append(('permission', p))
+ if u:
+ params.append(('username', u))
+ s = params_to_string(params)
+ lines.append(s)
+ if lines:
+ lines.sort()
+ text = '\n'.join(lines)
+ fs_conn = self.get_connection(event)
+ fs_conn.write_annotation(event.oid, self.annotation, text)
+ state = list(state)
+ state.sort()
+ return tuple(state)
+
+
+
+class FSUserList (FSGatewayBase):
+ """User list gateway, where the user list is stored in a flat file."""
+
+ __implements__ = IGateway
+
+ schema = RowSequenceSchema()
+ schema.add('id', 'string', 1)
+ schema.add('password', 'string')
+ schema.add('roles', 'string:list')
+ schema.add('domains', 'string:list')
+
+ def load(self, event):
+ c = self.get_connection(event)
+ assert c.read_node_type(event.oid) == 'f'
+ text = c.read_data(event.oid)
+ res = []
+ for line in text.split('\n'):
+ L = line.strip()
+ if not L.startswith('#') and ':' in L:
+ id, password, rolelist, domainlist = L.split(':', 3)
+ roles = self._split_list(rolelist)
+ domains = self._split_list(domainlist)
+ res.append((id, password, roles, domains))
+ res.sort()
+ return res, text
+
+
+ def _split_list(self, s):
+ return tuple([item.strip() for item in s.split(',') if item])
+
+
+ def _join_list(self, items):
+ for item in items:
+ if item.strip() != item:
+ raise MappingError(
+ "Leading and trailing whitespace are not allowed "
+ "in roles and domains")
+ item = item.strip()
+ if not item:
+ raise MappingError("Empty role or domain not allowed")
+ if ',' in item or ':' in item or '\n' in item:
+ raise MappingError(
+ "Commas, colons, and newlines are not allowed "
+ "in roles and domains")
+ return ','.join(items)
+
+
+ def store(self, event, state):
+ replace_lines = {}
+ for id, password, roles, domains in state:
+ if ':' in id or '\n' in id:
+ raise MappingError('User IDs cannot have colons or newlines')
+ if id.startswith('#'):
+ raise MappingError('User IDs cannot start with #')
+ if ':' in password or '\n' in password:
+ raise MappingError('Passwords cannot have colons or newlines')
+ rolelist = self._join_list(roles)
+ domainlist = self._join_list(domains)
+ to_write = '%s:%s:%s:%s' % (id, password, rolelist, domainlist)
+ replace_lines[id] = to_write
+ oid = event.oid
+ fs_conn = self.get_connection(event)
+ fs_conn.write_node_type(oid, 'f')
+ # Read the existing text only to maintain the current order.
+ text = fs_conn.read_data(oid, allow_missing=1)
+ if text is None:
+ text = ''
+ new_lines = []
+ # Replace / remove users
+ for line in text.split('\n'):
+ L = line.strip()
+ if not L.startswith('#'):
+ if ':' in L:
+ name, stuff = L.split(':', 1)
+ replace = replace_lines.get(name, '')
+ if replace and replace != L:
+ new_lines.append(replace)
+ del replace_lines[name]
+ # else remove the line
+ else:
+ new_lines.append(line)
+ # Append new users
+ for line in replace_lines.values():
+ new_lines.append(line)
+ # Write it
+ text = '\n'.join(new_lines)
+ fs_conn.write_data(oid, text)
+ serial = list(state)
+ serial.sort()
+ return text
Property changes on: Products.Ape/trunk/lib/apelib/fs/security.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/fs/structure.py
===================================================================
--- Products.Ape/trunk/lib/apelib/fs/structure.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/fs/structure.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,154 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Basic filesystem gateways.
+
+$Id$
+"""
+
+from types import StringType
+
+from apelib.core.interfaces import IGateway, LoadError
+from apelib.core.schemas import ColumnSchema, RowSequenceSchema
+
+from base import FSGatewayBase
+
+
+class FSFileData (FSGatewayBase):
+ """File data gateway, where data is a string.
+ """
+
+ __implements__ = IGateway
+
+ schema = ColumnSchema('data', 'string')
+
+ def __init__(self, text=0, conn_name='fs'):
+ if text == 'text':
+ text = 1
+ elif text == 'binary':
+ text = 0
+ self.text = text
+ FSGatewayBase.__init__(self, conn_name)
+
+ def load(self, event):
+ c = self.get_connection(event)
+ assert c.read_node_type(event.oid) == 'f'
+ state = c.read_data(event.oid, as_text=self.text)
+ return state, state
+
+ def store(self, event, state):
+ if not isinstance(state, StringType):
+ raise ValueError('Not a string: %s' % repr(state))
+ c = self.get_connection(event)
+ c.write_node_type(event.oid, 'f')
+ c.write_data(event.oid, state, as_text=self.text)
+ return state
+
+
+class FSAutoId (FSGatewayBase):
+ """Automatic ID gateway based on the object name in the primary parent.
+ """
+
+ __implements__ = IGateway
+
+ schema = ColumnSchema('id', 'string')
+
+ def load(self, event):
+ id = self.get_connection(event).read_object_name(event.oid)
+ # Disable conflict checking by returning None as the hash value.
+ return id, None
+
+ def store(self, event, state):
+ # Ignore.
+ return None
+
+ def get_sources(self, event):
+ fs_conn = self.get_connection(event)
+ return fs_conn.get_sources(event.oid)
+
+
+class FSDirectoryItems (FSGatewayBase):
+ """Read/write objects in a filesystem directory."""
+
+ __implements__ = IGateway
+
+ schema = RowSequenceSchema()
+ schema.add('key', 'string', 1)
+ schema.add('oid', 'string')
+ schema.add('classification', 'classification')
+
+ def load(self, event):
+ c = self.get_connection(event)
+ if c.read_node_type(event.oid) != 'd':
+ raise LoadError("Not a directory")
+ data = list(c.read_directory(event.oid))
+ data.sort()
+ # Assign OIDs to previously existing subobjects.
+ assigned = {}
+ for objname, child_oid in data:
+ if child_oid is None:
+ child_oid = event.conf.oid_gen.new_oid(event)
+ assigned[objname] = child_oid
+ if assigned:
+ # Saw existing objects. Tell the connection what their OIDs are.
+ c.assign_existing(event.oid, assigned.items())
+ # Return the results.
+ res = []
+ hash_value = []
+ for objname, child_oid in data:
+ if child_oid is None:
+ child_oid = assigned[objname]
+ classification = event.classify(child_oid)
+ # Return info about each subobject.
+ res.append((objname, child_oid, classification))
+ hash_value.append((objname, child_oid))
+ return res, tuple(hash_value)
+
+ def store(self, event, state):
+ c = self.get_connection(event)
+ c.write_node_type(event.oid, 'd')
+ data = []
+ for objname, child_oid, classification in state:
+ data.append((objname, child_oid))
+ data.sort()
+ c.write_directory(event.oid, data)
+ return tuple(data)
+
+
+class FSModTime (FSGatewayBase):
+ """Reads the modification time of a file."""
+
+ __implements__ = IGateway
+
+ schema = ColumnSchema('mtime', 'int')
+
+ def load(self, event):
+ fs_conn = self.get_connection(event)
+ state = long(fs_conn.read_mod_time(event.oid))
+ return state, None # Use None as the hash (see store())
+
+ def store(self, event, state):
+ # Under normal circumstances, there is no need to change the mod
+ # time of a file. Ignore by returning None as the hash.
+ return None
+
+
+def root_mapping():
+ """Returns a gateway suitable for storing the root persistent mapping.
+ """
+ from apelib.core.gateways import CompositeGateway
+ from properties import FSAnnotationData
+ g = CompositeGateway()
+ g.add('references', FSDirectoryItems())
+ g.add('others', FSAnnotationData('others'))
+ return g
Property changes on: Products.Ape/trunk/lib/apelib/fs/structure.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,17 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""RDBMS gateway package
+
+$Id$
+"""
Property changes on: Products.Ape/trunk/lib/apelib/sql/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/classification.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/classification.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/classification.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,57 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""SQL classification gateway
+
+$Id$
+"""
+
+from apelib.core.schemas import ColumnSchema, RowSequenceSchema
+from apelib.core.interfaces import OIDConflictError
+from sqlbase import SQLGatewayBase
+
+
+class SQLClassification (SQLGatewayBase):
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = ColumnSchema('classification', 'classification')
+ table_name = 'classification'
+ table_schema = RowSequenceSchema()
+ table_schema.add('class_name', 'string', 0)
+ table_schema.add('mapper_name', 'string', 0)
+
+ def load(self, event):
+ table = self.get_table(event)
+ rows = table.select(self.column_names, oid=event.oid)
+ classification = {}
+ if rows:
+ rec = rows[0]
+ if rec[0]:
+ classification['class_name'] = rec[0]
+ if rec[1]:
+ classification['mapper_name'] = rec[1]
+ else:
+ raise KeyError(event.oid)
+ return classification, rec
+
+ def store(self, event, classification):
+ conn = self.get_connection(event)
+ table = self.get_table(event)
+ row = (classification.get('class_name', ''),
+ classification.get('mapper_name', ''))
+ try:
+ table.set_one(event.oid, self.column_names, row, event.is_new)
+ except conn.module.DatabaseError:
+ raise OIDConflictError(event.oid)
+ return row
Property changes on: Products.Ape/trunk/lib/apelib/sql/classification.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/dbapi.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/dbapi.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/dbapi.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,337 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""SQL database connections via DB-API 2.0.
+
+$Id$
+"""
+
+import os
+import re
+from time import time
+
+from apelib.core.interfaces import ITPCConnection
+from apelib.core.schemas import ColumnSchema
+from apelib.sql.interfaces import ISQLConnection, IRDBMSColumn
+from apelib.sql.table import SQLTable
+
+name_style_re = re.compile(':[A-Za-z0-9_-]+')
+
+DEBUG = os.environ.get('APE_DEBUG_SQL')
+PROFILE = os.environ.get('APE_PROFILE_SQL')
+
+
+class AbstractSQLConnection:
+
+ __implements__ = ISQLConnection, ITPCConnection
+
+ # factories by column name take precedence over factories by column type.
+ column_factories_by_name = {} # { local col name -> column factory }
+ column_factories_by_type = {} # { local type name -> column factory }
+ column_name_translations = {} # { local col name -> db col name }
+ column_type_translations = {} # { local type name -> db type name }
+ module = None
+ connector = None
+
+ def __init__(self, module_name, connect_expression, prefix=''):
+ # connect_expression is a Python expression.
+ self.module_name = module_name
+ self.module = __import__(module_name, {}, {}, ('__doc__',))
+ if not hasattr(self.module, "connect"):
+ raise ImportError(
+ "Module '%s' does not have a 'connect' method." % module_name)
+ self.connect_expression = connect_expression
+ self.prefix = prefix
+ self.connector = None
+ self.transaction_started = False
+ self._tables = {}
+ self._final = 0
+
+ def __repr__(self):
+ return '<%s(module_name=%s)>' % (
+ self.__class__.__name__, repr(self.module_name))
+
+ #
+ # IRDBMSConnection implementation.
+ #
+
+ def define_table(self, name, schema):
+ """Creates and returns an IRDBMSTable."""
+ table = SQLTable(self, self.prefix + name)
+ for c in schema.get_columns():
+ factory = self.column_factories_by_name.get(c.name)
+ if factory is None:
+ factory = self.column_factories_by_type.get(c.type)
+ if factory is None:
+ factory = RDBMSColumn
+ dbc = factory(self, c)
+ n = self.column_name_translations.get(c.name)
+ if n is not None:
+ dbc.name = n
+ t = self.column_type_translations.get(c.type)
+ if t is not None:
+ dbc.type = t
+ table.add_column(c.name, dbc)
+ self._tables[name] = table
+ return table
+
+ def get_table(self, name):
+ """Returns a previously defined IRDBMSTable."""
+ return self._tables[name]
+
+ def exists(self, name, type_name):
+ """Returns true if the specified database object exists.
+
+ type_name is 'table' or 'sequence'
+ """
+ raise NotImplementedError("Abstract Method")
+
+ def list_table_names(self):
+ """Returns a list of existing table names.
+ """
+ raise NotImplementedError("Abstract Method")
+
+ def create_sequence(self, name, start=1):
+ """Creates a sequence.
+ """
+ raise NotImplementedError("Abstract Method")
+
+ def reset_sequence(self, name, start=1):
+ """Resets a sequence.
+ """
+ raise NotImplementedError("Abstract Method")
+
+ def increment(self, name):
+ """Increments a sequence.
+ """
+ raise NotImplementedError("Abstract Method")
+
+ def clear_table(self, name):
+ """Removes all rows from a table.
+ """
+ self.execute('DELETE FROM %s' % (self.prefix + name))
+
+ def execute(self, sql, kw=None, fetch=False):
+ if self.connector is None:
+ raise RuntimeError('Not connected')
+ converter = style_converters[self.module.paramstyle]
+ sql, param_names = converter(sql)
+ if param_names is None:
+ # The query expects keyword parameters.
+ params = kw or {}
+ else:
+ # The query expects positional parameters.
+ if not param_names:
+ params = ()
+ else:
+ params = tuple([kw[n] for n in param_names])
+ self.transaction_started = True
+ cursor = self.connector.cursor()
+ try:
+ if DEBUG or PROFILE:
+ print 'SQL: %s, %s' % (repr(sql), params)
+ if PROFILE:
+ start = time()
+ cursor.execute(sql, params)
+ end = time()
+ print 'SQL time: %0.6fs' % (end - start)
+ else:
+ if not params:
+ cursor.execute(sql)
+ else:
+ cursor.execute(sql, params)
+ if fetch:
+ res = list(cursor.fetchall())
+ if DEBUG:
+ print 'SQL result: %s' % repr(res)
+ return res
+ finally:
+ cursor.close()
+
+ #
+ # ITPCConnection implementation.
+ #
+
+ def connect(self):
+ d = {'connect': self.module.connect}
+ self.connector = eval(self.connect_expression, d)
+
+ def sortKey(self):
+ return repr(self)
+
+ def getName(self):
+ return repr(self)
+
+ def begin(self):
+ pass
+
+ def vote(self):
+ self._final = 1
+
+ def reset(self):
+ self._final = 0
+ self.transaction_started = False
+
+ def abort(self):
+ try:
+ if DEBUG:
+ print 'SQL ROLLBACK'
+ self.connector.rollback()
+ finally:
+ self.reset()
+
+ def finishWrite(self):
+ pass
+
+ def finishCommit(self):
+ if self._final:
+ try:
+ if DEBUG:
+ print 'SQL COMMIT'
+ self.connector.commit()
+ finally:
+ self.reset()
+
+ def close(self):
+ c = self.connector
+ if c is not None:
+ self.connector = None
+ c.close()
+
+# Converters for all parameter styles defined by DB-API 2.0.
+# Each converter returns translated SQL and a list of positional parameters.
+# The list of positional parameters may be None, indicating that a dictionary
+# should be supplied rather than a tuple.
+
+style_converters = {}
+
+def convert_to_qmark(sql):
+ # '?' format
+ params = []
+ def replace(match, params=params):
+ name = match.group()[1:]
+ params.append(name)
+ return '?'
+ sql = name_style_re.sub(replace, sql)
+ return sql, params
+style_converters['qmark'] = convert_to_qmark
+
+def convert_to_numeric(sql):
+ # ':1' format
+ params = []
+ def replace(match, params=params):
+ name = match.group()[1:]
+ index = len(params)
+ params.append(name)
+ return ':%d' % index
+ sql = name_style_re.sub(replace, sql)
+ return sql, params
+style_converters['numeric'] = convert_to_numeric
+
+def convert_to_named(sql):
+ # ':name' format
+ # The input format is the same as the output format.
+ return sql, None
+style_converters['named'] = convert_to_named
+
+def convert_to_format(sql):
+ # '%s' format
+ params = []
+ def replace(match, params=params):
+ name = match.group()[1:]
+ params.append(name)
+ return '%s'
+ sql = name_style_re.sub(replace, sql)
+ return sql, params
+style_converters['format'] = convert_to_format
+
+def convert_to_pyformat(sql):
+ # '%(name)s' format
+ def replace(match):
+ name = match.group()[1:]
+ return '%%(%s)s' % name
+ sql = name_style_re.sub(replace, sql)
+ return sql, None
+style_converters['pyformat'] = convert_to_pyformat
+
+
+
+# RDBMS column implementations.
+
+class RDBMSColumn(ColumnSchema):
+ """Basic RDBMS column. Does no type translation."""
+ __implements__ = IRDBMSColumn
+
+ use_conversion = False
+
+ def __init__(self, connection, column):
+ self.name = column.name
+ self.type = column.type
+ self.unique = column.unique
+
+ def to_db(self, value):
+ return value
+
+ def from_db(self, value):
+ return value
+
+
+class IntColumn(RDBMSColumn):
+ """RDBMS column that stores as integers."""
+ __implements__ = IRDBMSColumn
+
+ use_conversion = True
+
+ def to_db(self, value):
+ return int(value)
+
+ def from_db(self, value):
+ return str(value)
+
+
+class LongColumn(RDBMSColumn):
+ """RDBMS column that stores as long integers."""
+ __implements__ = IRDBMSColumn
+
+ use_conversion = True
+
+ def to_db(self, value):
+ return long(value)
+
+ def from_db(self, value):
+ return str(value)
+
+
+class BlobColumn (RDBMSColumn):
+ """RDBMS column that stores Binary objects."""
+ __implements__ = IRDBMSColumn
+
+ use_conversion = True
+
+ def __init__(self, connection, column):
+ RDBMSColumn.__init__(self, connection, column)
+ self.Binary = connection.module.Binary
+
+ def to_db(self, value):
+ return self.Binary(value)
+
+ def from_db(self, value):
+ if hasattr(value, 'tostring'):
+ # possibly an array (see Python's array module)
+ return value.tostring()
+ return str(value)
+
+
+# Set up default column types.
+AbstractSQLConnection.column_factories_by_name['oid'] = IntColumn
+AbstractSQLConnection.column_factories_by_type['blob'] = BlobColumn
Property changes on: Products.Ape/trunk/lib/apelib/sql/dbapi.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/ingres.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/ingres.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/ingres.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,182 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Ingres-specific database connection.
+
+$Id$
+"""
+
+from apelib.sql import dbapi
+
+class IngresConnection (dbapi.AbstractSQLConnection):
+ """
+ Name : IngresConnection - class for Ingres
+
+ Description
+ sub-classing of all methods needed to support the Ingres
+ relational database management system.
+
+ Inputs :
+
+ Output :
+
+ Exceptions :
+
+ History:
+ 1-Jul-2004 - (emma.mcgrattan at ca.com)
+ created
+ 14-Jul-2004 - (grant.croker at ca.com)
+ modified IngresConnection.exists() to work with
+ paramstyle=qmark
+ 14-Jul-2004 - (grant.croker at ca.com)
+ subclassed create_table into IngresConnection to make use
+ of Performance features of Ingres.
+ 15-Jul-2004 - (grant.croker at ca.com)
+ Corrected Syntax of "MODIFY TABLE ... "
+ Corrected parameter passing (Changed '?' to 'table_name')
+ 18-Jul-2004 - (srisu02 at ca.com)
+ Corrected Syntax for sequence increments
+ 18-Jul-2004 - (srisu02 at ca.com)
+ Corrected Syntax for sequence fetch() i.e added fetch=1 as a parameter
+ 22-Jul-2004 - (srisu02 at ca.com)
+ Integrated changes from dbapi.py
+ Made change for cache size while creating sequences
+ """
+
+ column_type_translations = {
+ 'long': 'bigint',
+ 'string': 'varchar(255)',
+ 'datetime': 'time',
+ 'boolean': 'tinyint',
+ }
+
+ column_name_translations = {
+ 'oid': 'objoid',
+ }
+
+ column_factories_by_name = (
+ dbapi.AbstractSQLConnection.column_factories_by_name.copy())
+
+ column_factories_by_type = (
+ dbapi.AbstractSQLConnection.column_factories_by_type.copy())
+
+ def exists(self, name, type_name):
+ """Returns true if the specified database object exists.
+
+ type_name is 'table' or 'sequence'
+ """
+ table_name = self.prefix + name
+ if type_name == 'table':
+ sql = ('SELECT table_name FROM iitables '
+ 'WHERE table_name = :name')
+ elif type_name == 'sequence':
+ sql = ("SELECT seq_name FROM iisequences "
+ "WHERE seq_name = :name")
+ else:
+ raise ValueError(type_name)
+ rows = self.execute(sql, {'name': table_name.lower()}, fetch=1)
+ return len(rows)
+
+ def list_table_names(self):
+ """Returns a list of existing table names.
+ """
+ sql = 'SELECT table_name FROM iitables'
+ rows = self.execute(sql, {}, fetch=1)
+ res = []
+ for (name,) in rows:
+ if not self.prefix or name.startswith(self.prefix):
+ res.append(name[len(self.prefix):])
+ return res
+
+ def create_sequence(self, name, start=1):
+ """Creates a sequence.
+ """
+ sql = "CREATE SEQUENCE %s START WITH %d CACHE 500" % (
+ self.prefix + name, start)
+ self.execute(sql)
+
+ def reset_sequence(self, name, start=1):
+ """Resets a sequence.
+ """
+ sql = "ALTER SEQUENCE %s RESTART WITH %d" % (
+ self.prefix + name, start)
+ self.execute(sql)
+
+ def increment(self, name):
+ """Increments a sequence.
+ """
+ sql = "SELECT NEXT VALUE FOR %s" % (self.prefix + name)
+ rows = self.execute(sql, fetch=1)
+ return rows[0][0]
+
+ def create_table(self, table, column_defs):
+ """
+ Name : IngresConnection - class for Ingres
+
+ Description
+ sub-classing of all methods needed to support the Ingres
+ relational database management system.
+
+ Inputs :
+
+ Output :
+
+ Exceptions :
+
+ History:
+ 14-Jul-2004 - (grant.croker at ca.com)
+ Created - based on AbstractSQLConnection
+
+ NOTES
+ -----
+ Ingres supports 4 table structures. Depending on the key
+ some are more preferrable than others. HEAP and ISAM are
+ being ruled out on performance and maintenance grounds.
+ BTREE is normally the best catch all solution but
+ suffers when the key is sequentially increasing. HASH is good
+ for one hit lookups but can require a more-frequent maintenance
+ routine.
+
+ The page size of the tables created is controlled by the
+ ingres_page_size variable. Valid values are: 2048, 4096,
+ 8192, 16384, 32768 and 65536.
+ """
+ ingres_page_size = 8192
+ ingres_table_structure = "BTREE"
+ table_name = self.prefix + table
+ cols = []
+ indexes = []
+ for name, typ, unique in column_defs:
+ col = self.translate_name(name)
+ db_type = self.translate_type(typ)
+ constraints = ''
+ if unique:
+ constraints = ' NOT NULL'
+ indexes.append(col)
+ cols.append("%s %s%s" % (col, db_type, constraints))
+ sql = "CREATE TABLE %s (%s)" % (table_name, ', '.join(cols))
+ self.execute(sql)
+ if indexes:
+ sql = "MODIFY %s TO %s UNIQUE ON %s WITH PAGE_SIZE=%d" % (
+ table_name, ingres_table_structure, ', '.join(indexes),
+ ingres_page_size)
+ self.execute(sql)
+ else:
+ sql = "MODIFY %s TO %s WITH PAGE_SIZE=%d" % (
+ table_name, ingres_table_structure, ingres_page_size)
+ traceback.print_stack()
+ self.execute(sql)
+
+IngresConnection.column_factories_by_type['boolean'] = dbapi.IntColumn
+IngresConnection.column_factories_by_type['int'] = dbapi.IntColumn
+IngresConnection.column_factories_by_type['long'] = dbapi.LongColumn
Property changes on: Products.Ape/trunk/lib/apelib/sql/ingres.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/interfaces.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/interfaces.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/interfaces.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,136 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interfaces for apelib.sql.
+
+$Id$
+"""
+
+from Interface import Interface
+from Interface.Attribute import Attribute
+from apelib.core.interfaces import IColumnSchema
+
+
+class IRDBMSConnection (Interface):
+ """Interface of basic RDBMS connections.
+
+ This interface provides only relatively basic operations. Create
+ subinterfaces for complex and vendor-specific extensions.
+ """
+
+ module = Attribute("module", "The DB-API module")
+
+ connector = Attribute("connector", "The shared DB-API connection")
+
+ def define_table(name, schema):
+ """Creates and returns an IRDBMSTable.
+
+ Does not create the table in the database. table.create()
+ creates the table.
+ """
+
+ def get_table(name):
+ """Returns a previously defined IRDBMSTable."""
+
+ def exists(name, type_name):
+ """Returns true if the specified database object exists.
+
+ 'name' is the name of the object. 'type_name' is 'table' or
+ 'sequence'.
+ """
+
+ def list_table_names():
+ """Returns a list of existing table names."""
+
+ def create_sequence(name, start=1):
+ """Creates a sequence."""
+
+ def reset_sequence(name, start=1):
+ """Resets a sequence to a starting value."""
+
+ def increment(name):
+ """Increments a sequence and returns the value.
+
+ Whether the value is before or after the increment is not specified.
+ """
+
+ def clear_table(name):
+ """Removes all rows from a table.
+
+ This is not a method of IRDBMSTable because it is not
+ always possible to construct an IRDBMSTable while resetting
+ tables.
+ """
+
+
+class ISQLConnection (IRDBMSConnection):
+
+ def execute(sql, kw=None, fetch=False):
+ """Executes a SQL query.
+
+ If kw is provided, parameters in the SQL are substituted for
+ parameter values. If fetch is true, the rows from the results
+ are returned. No type conversion happens in execute().
+ """
+
+
+class IRDBMSTable (Interface):
+ """A table in a database."""
+
+ def select(result_col_names, **filter):
+ """Selects rows from a table and returns column values for those rows.
+ """
+
+ def insert(col_names, row):
+ """Inserts one row in the table."""
+
+ def set_one(oid, col_names, row, is_new):
+ """Sets one row in the table.
+
+ Executes either an update or insert operation, depending
+ on the is_new argument and configured policies.
+ """
+
+ def set_many(oid, key_col_names, other_col_names, rows):
+ """Sets multiple rows in the table.
+
+ 'rows' is a sequence of tuples containing values for the
+ key_col_names as well as the other_col_names.
+
+ Either deletes all rows for an oid and inserts new rows, or
+ examines the current state of the database and modifies it in
+ pieces.
+ """
+
+ def delete_rows(**filter):
+ """Deletes rows from the table."""
+
+ def create():
+ """Creates the table."""
+
+ def drop():
+ """Drops the table."""
+
+
+class IRDBMSColumn (IColumnSchema):
+ """A column associated with a specific database."""
+
+ use_conversion = Attribute(
+ "use_conversion", "True if this column needs to convert values.")
+
+ def to_db(value):
+ """Converts a generic value to a database-specific value."""
+
+ def from_db(value):
+ """Converts a database-specific value to a generic value."""
+
Property changes on: Products.Ape/trunk/lib/apelib/sql/interfaces.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/mysql.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/mysql.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/mysql.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,79 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""MySQL-specific database connection.
+
+$Id$
+"""
+
+from apelib.sql.dbapi import AbstractSQLConnection
+
+class MySQLConnection (AbstractSQLConnection):
+
+ column_type_translations = {
+ 'long': 'bigint',
+ 'string': 'character varying(255)',
+ 'blob': 'longblob',
+ 'boolean': 'tinyint(1)',
+ }
+
+ column_name_translations = {
+ 'oid': 'objoid',
+ }
+
+ def exists(self, name, type_name):
+ """Returns true if the specified database object exists.
+
+ type_name is 'table' or 'sequence'
+ """
+ table_name = self.prefix + name
+ if type_name not in ('table', 'sequence'):
+ raise ValueError(type_name)
+ sql = 'SHOW TABLES LIKE :name'
+ rows = self.execute(sql, {'name': table_name}, fetch=1)
+ return len(rows)
+
+ def list_table_names(self):
+ """Returns a list of existing table names.
+ """
+ sql = 'SHOW TABLES'
+ rows = self.execute(sql, {}, fetch=1)
+ res = []
+ for (name,) in rows:
+ if not self.prefix or name.startswith(self.prefix):
+ res.append(name[len(self.prefix):])
+ return res
+
+ def create_sequence(self, name, start=1):
+ """Creates a sequence.
+ """
+ table_name = self.prefix + name
+ self.execute("CREATE TABLE %s (last_value int)" % table_name)
+ self.execute("INSERT INTO %s VALUES (%d)" % (table_name, start))
+
+ def reset_sequence(self, name, start=1):
+ """Resets a sequence.
+ """
+ table_name = self.prefix + name
+ self.execute("UPDATE %s SET last_value=0" % table_name)
+
+ def increment(self, name):
+ """Increments a sequence.
+ """
+ table_name = self.prefix + name
+ self.execute(
+ "UPDATE %s SET last_value=LAST_INSERT_ID(last_value+1)" %
+ table_name)
+ rows = self.execute("SELECT LAST_INSERT_ID()", fetch=1)
+ return rows[0][0]
+
Property changes on: Products.Ape/trunk/lib/apelib/sql/mysql.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/oidgen.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/oidgen.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/oidgen.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,44 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""SQL OID generator
+
+$Id$
+"""
+
+from apelib.core import interfaces
+
+import sqlbase
+
+
+class SQLOIDGenerator (sqlbase.SQLGatewayBase):
+
+ __implements__ = (interfaces.IOIDGenerator,
+ interfaces.IDatabaseInitializer)
+
+ table_name = 'oid_seq'
+ root_oid = "0"
+
+ def init(self, event):
+ conn = self.get_connection(event)
+ if not conn.exists(self.table_name, 'sequence'):
+ conn.create_sequence(self.table_name, start=1)
+ elif event.clear_all:
+ conn.reset_sequence(self.table_name, start=1)
+
+ def new_oid(self, event):
+ """Returns a new OID. Must return a string."""
+ assert interfaces.IGatewayEvent.isImplementedBy(event)
+ conn = self.get_connection(event)
+ n = conn.increment(self.table_name)
+ return str(n)
Property changes on: Products.Ape/trunk/lib/apelib/sql/oidgen.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/postgresql.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/postgresql.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/postgresql.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,80 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""PostgreSQL-specific database connection.
+
+$Id$
+"""
+
+from apelib.sql.dbapi import AbstractSQLConnection
+
+
+class PostgreSQLConnection (AbstractSQLConnection):
+
+ column_type_translations = {
+ 'long': 'bigint',
+ 'string': 'character varying(255)',
+ 'blob': 'bytea',
+ 'datetime': 'timestamp',
+ }
+
+ column_name_translations = {
+ 'oid': 'objoid',
+ }
+
+ def exists(self, name, type_name):
+ """Returns true if the specified database object exists.
+
+ type_name is 'table' or 'sequence'
+ """
+ table_name = self.prefix + name
+ if type_name == 'table':
+ sql = ('SELECT tablename FROM pg_tables '
+ 'WHERE tablename = :name')
+ elif type_name == 'sequence':
+ sql = ("SELECT relname FROM pg_class "
+ "WHERE relkind = 'S' AND relname = :name")
+ else:
+ raise ValueError(type_name)
+ rows = self.execute(sql, {'name': table_name.lower()}, fetch=1)
+ return len(rows)
+
+ def list_table_names(self):
+ """Returns a list of existing table names.
+ """
+ sql = 'SELECT tablename FROM pg_tables'
+ rows = self.execute(sql, {}, fetch=1)
+ res = []
+ for (name,) in rows:
+ if not self.prefix or name.startswith(self.prefix):
+ res.append(name[len(self.prefix):])
+ return res
+
+ def create_sequence(self, name, start=1):
+ """Creates a sequence.
+ """
+ sql = "CREATE SEQUENCE %s START %d" % (self.prefix + name, start)
+ self.execute(sql)
+
+ def reset_sequence(self, name, start=1):
+ """Resets a sequence.
+ """
+ sql = "SELECT setval('%s', %d)" % (self.prefix + name, start)
+ self.execute(sql)
+
+ def increment(self, name):
+ """Increments a sequence.
+ """
+ sql = "SELECT nextval('%s')" % (self.prefix + name)
+ rows = self.execute(sql, fetch=1)
+ return rows[0][0]
Property changes on: Products.Ape/trunk/lib/apelib/sql/postgresql.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/properties.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/properties.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/properties.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,297 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""SQL properties
+
+$Id$
+"""
+
+from apelib.core.schemas import RowSequenceSchema
+from apelib.core.interfaces import IGateway, IDatabaseInitializer
+from sqlbase import SQLGatewayBase
+
+
+# safe_property_types lists the property types that are safe to store
+# in table columns. Floats are not permitted because their value can
+# change when converting to/from strings. Dates (based on Zope's
+# DateTime class) are not permitted because their precision is not
+# well defined, some databases don't store time zones, and Zope's
+# DateTime class is hard to convert to other date/time types without
+# losing information.
+
+safe_property_types = {
+ 'string': 1,
+ 'int': 1,
+ 'long': 1,
+ 'text': 1,
+ 'boolean': 1,
+ }
+
+
+class SQLProperties (SQLGatewayBase):
+ """SQL properties gateway
+ """
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = RowSequenceSchema()
+ schema.add('id', 'string', 1)
+ schema.add('type', 'string')
+ schema.add('data', 'string')
+ table_name = 'properties'
+ table_schema = RowSequenceSchema()
+ table_schema.add('id', 'string', 1)
+ table_schema.add('type', 'string', 0)
+ table_schema.add('data', 'blob', 0)
+
+ def load(self, event):
+ table = self.get_table(event)
+ rows = table.select(self.column_names, oid=event.oid)
+ rows.sort()
+ return rows, tuple(rows)
+
+ def store(self, event, state):
+ table = self.get_table(event)
+ rows = [(id, t, data) for id, t, data in state]
+ table.set_many(event.oid, ('id',), ('type', 'data'), rows)
+ state = list(state)
+ state.sort()
+ return tuple(state)
+
+
+class SQLFixedProperties (SQLGatewayBase):
+ """SQL fixed-schema properties gateway.
+ """
+
+ def __init__(self, conn_name, table_name, schema):
+ self.table_name = table_name
+ self.schema = schema
+ SQLGatewayBase.__init__(self, conn_name)
+ self.columns = schema.get_columns()
+
+ def init(self, event):
+ conn = self.get_connection(event)
+ all = RowSequenceSchema(
+ self.oid_columns + self.table_schema.get_columns())
+ table = conn.define_table(self.table_name, all)
+ if not conn.exists(self.table_name, 'table'):
+ table.create()
+
+ def load(self, event):
+ table = self.get_table(event)
+ recs = table.select(self.column_names, oid=event.oid)
+ if not recs:
+ return (), ()
+ if len(recs) > 1:
+ raise ValueError("Multiple records where only one expected")
+ record = [str(value) for value in recs[0]]
+ items = []
+ cols = self.columns
+ for n in range(len(cols)):
+ name = cols[n].name
+ if name.startswith('_'):
+ prop_name = name[1:]
+ else:
+ prop_name = name
+ items.append((prop_name, cols[n].type, record[n]))
+ return items, tuple(record)
+
+ def store(self, event, state, leftover=None):
+ cols = self.columns
+ statedict = {} # prop name -> (type, value)
+ for name, typ, value in state:
+ statedict[name] = (typ, value)
+ record = []
+ for col in cols:
+ name = col.name
+ if name.startswith('_'):
+ prop_name = name[1:]
+ else:
+ prop_name = name
+ if statedict.has_key(prop_name):
+ typ, value = statedict[prop_name]
+ record.append(str(value))
+ del statedict[prop_name]
+ else:
+ record.append(None) # Hopefully this translates to null.
+ if statedict:
+ if leftover is not None:
+ # Pass back a dictionary of properties not stored yet.
+ leftover.update(statedict)
+ else:
+ raise ValueError(
+ "Extra properties provided for fixed schema: %s"
+ % statedict.keys())
+ table = self.get_table(event)
+ table.set_one(event.oid, self.column_names, record, event.is_new)
+ return tuple(record)
+
+
+
+class SQLMultiTableProperties (SQLGatewayBase):
+ """Combines fixed and variable properties.
+ """
+
+ __implements__ = IGateway, IDatabaseInitializer
+
+ schema = SQLProperties.schema
+
+ table_name = 'property_tables'
+ table_schema = RowSequenceSchema()
+ table_schema.add('class_name', 'string', 1)
+ table_schema.add('table_name', 'string', 0)
+ oid_columns = [] # No OID column
+
+ def __init__(self, conn_name='db'):
+ self.var_props = SQLProperties(conn_name=conn_name)
+ self.fixed_props = {} # class name -> SQLFixedProperties instance
+ SQLGatewayBase.__init__(self, conn_name)
+
+ def get_sources(self, event):
+ return None
+
+ def init(self, event):
+ conn = self.get_connection(event)
+ table = conn.define_table(self.table_name, self.table_schema)
+ if not conn.exists(self.table_name, 'table'):
+ table.create()
+ self.var_props.init(event)
+ if event.clear_all:
+ # Clear the fixed property tables.
+ recs = table.select(('table_name',))
+ for (name,) in recs:
+ conn.clear_table(name)
+ self.fixed_props = {}
+
+
+ def get_schema_for_class(self, module_name, class_name):
+ """Returns the class-defined property schema.
+
+ This Zope2-ism should be made pluggable later on.
+ """
+ d = {}
+ m = __import__(module_name, d, d, ('__doc__',))
+ klass = getattr(m, class_name)
+ schema = RowSequenceSchema()
+ props = getattr(klass, '_properties', ())
+ if not props:
+ return None
+ for p in props:
+ if not safe_property_types.has_key(p['type']):
+ # Don't store this property in its own column.
+ # It is of a type that's hard to convert faithfully.
+ continue
+ prop_name = p['id']
+ if prop_name == 'oid':
+ name = '_oid'
+ else:
+ name = prop_name
+ schema.add(name, p['type'], 0)
+ return schema
+
+
+ def get_fixed_props(self, event):
+ """Returns a SQLFixedProperties instance or None.
+ """
+ classification = event.classification
+ if classification is None:
+ return None
+ cn = classification.get('class_name')
+ if cn is None:
+ return None
+ if self.fixed_props.has_key(cn):
+ return self.fixed_props[cn] # May be None
+
+ # Gather info about the class
+ pos = cn.rfind('.')
+ if pos < 0:
+ raise ValueError, "Not a qualified class name: %s" % repr(cn)
+ module_name = cn[:pos]
+ class_name = cn[pos + 1:]
+ schema = self.get_schema_for_class(module_name, class_name)
+ if schema is None or not schema.get_columns():
+ # No fixed properties exist for this class.
+ self.fixed_props[cn] = None
+ return None
+
+ # Allocate a table name
+ conn = self.get_connection(event)
+ table = self.get_table(event)
+ rows = table.select(('table_name',), class_name=cn)
+ if rows:
+ table_name = rows[0][0]
+ else:
+ attempt = 0
+ while 1:
+ # Find an available table name.
+ table_name = '%s_properties' % (class_name[:16])
+ if attempt:
+ table_name += '_%02d' % attempt
+ if not conn.exists(table_name, 'table'):
+ break
+ attempt += 1
+ table.insert(('class_name', 'table_name'), (cn, table_name))
+
+ # Create the fixed properties and table
+ fp = SQLFixedProperties(self.conn_name, table_name, schema)
+ fp.init(event)
+ # XXX If the transaction gets aborted, the table creation will
+ # be undone, but self.fixed_props won't see the change.
+ # Perhaps we need to reset self.fixed_props on abort.
+ self.fixed_props[cn] = fp
+ return fp
+
+
+ def load(self, event):
+ """Returns a combination of states from two tables."""
+ var_state, var_hash = self.var_props.load(event)
+ fp = self.get_fixed_props(event)
+ if fp is None:
+ return var_state, var_hash
+ fixed_state, fixed_hash = fp.load(event)
+ # Merge fixed_state and var_state, letting fixed_state
+ # override var_state except when the value in fixed_state is
+ # None.
+ res = []
+ placement = {} # property name -> placement in results
+ for rec in fixed_state:
+ placement[rec[0]] = len(res)
+ res.append(rec)
+ for rec in var_state:
+ index = placement.get(rec[0])
+ if index is None:
+ res.append(rec)
+ elif res[index][2] is None:
+ # override the fixed value, since it was None.
+ res[index] = rec
+ return res, (fixed_hash, var_hash)
+
+
+ def store(self, event, state):
+ """Stores state in two tables."""
+ fp = self.get_fixed_props(event)
+ if fp is None:
+ return self.var_props.store(event, state)
+ # Store the fixed state first and find out what got left over.
+ leftover = {}
+ state = list(state)
+ state.sort()
+ fixed_hash = fp.store(event, state, leftover=leftover)
+ if leftover:
+ var_state = []
+ for prop_name, (typ, value) in leftover.items():
+ var_state.append((prop_name, typ, value))
+ var_hash = self.var_props.store(event, var_state)
+ else:
+ var_hash = ()
+ return (fixed_hash, var_hash)
Property changes on: Products.Ape/trunk/lib/apelib/sql/properties.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/security.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/security.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/security.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,138 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""SQL gateways for security information.
+
+$Id$
+"""
+
+from apelib.core.schemas import RowSequenceSchema, ColumnSchema
+from sqlbase import SQLGatewayBase
+
+
+class SQLSecurityAttributes (SQLGatewayBase):
+ """SQL security attribute storage"""
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = RowSequenceSchema()
+ schema.add('declaration_type', 'string')
+ schema.add('role', 'string')
+ schema.add('permission', 'string')
+ schema.add('username', 'string')
+
+ table_name = 'security'
+ oid_columns = [ColumnSchema('oid', 'int', 0)] # Don't create a primary key
+
+ def load(self, event):
+ table = self.get_table(event)
+ items = table.select(self.column_names, oid=event.oid)
+ items.sort()
+ return items, tuple(items)
+
+ def store(self, event, state):
+ table = self.get_table(event)
+ table.set_many(event.oid, (), self.column_names, state)
+ state = list(state)
+ state.sort()
+ return tuple(state)
+
+
+
+class SQLUserList (SQLGatewayBase):
+ """Stores and retrieves all users for a folder at once."""
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = RowSequenceSchema()
+ schema.add('id', 'string', 1)
+ schema.add('password', 'string')
+ schema.add('roles', 'string:list')
+ schema.add('domains', 'string:list')
+
+ table_defs = {
+ 'users': [('oid', 'int', 1),
+ ('id', 'string', 1),
+ ('password', 'string', 0)],
+ 'user_roles': [('oid', 'int', 0),
+ ('id', 'string', 0),
+ ('role', 'string', 0)],
+ 'user_domains': [('oid', 'int', 0),
+ ('id', 'string', 0),
+ ('domain', 'string', 0)],
+ }
+
+
+ def init(self, event):
+ conn = self.get_connection(event)
+ for table_name, columns in self.table_defs.items():
+ table_schema = RowSequenceSchema()
+ for args in columns:
+ table_schema.add(*args)
+ table = conn.define_table(table_name, table_schema)
+ if not conn.exists(table_name, 'table'):
+ table.create()
+ elif event.clear_all:
+ table.delete_rows()
+
+
+ def load(self, event):
+ conn = self.get_connection(event)
+ rows = conn.get_table('users').select(
+ ('id', 'password'), oid=event.oid)
+ data = {}
+ for id, password in rows:
+ data[id] = (password, [], [])
+ rows = conn.get_table('user_roles').select(
+ ('id', 'role'), oid=event.oid)
+ for id, role in rows:
+ row = data.get(id)
+ if row is not None:
+ row[1].append(role)
+ rows = conn.get_table('user_domains').select(
+ ('id', 'domain'), oid=event.oid)
+ for id, domain in rows:
+ row = data.get(id)
+ if row is not None:
+ row[2].append(domain)
+ records = []
+ for id, (password, roles, domains) in data.items():
+ roles = list(roles)
+ roles.sort()
+ domains = list(domains)
+ domains.sort()
+ records.append((id, password, tuple(roles), tuple(domains)))
+ records.sort()
+ return records, tuple(records)
+
+
+ def store(self, event, state):
+ oid = event.oid
+ conn = self.get_connection(event)
+ rows = [(id, pw) for id, pw, roles, domains in state]
+ conn.get_table('users').set_many(
+ event.oid, (), ('id', 'password',), rows)
+ roles_d = {}
+ domains_d = {}
+ for id, pw, roles, domains in state:
+ for role in roles:
+ roles_d[(id, role)] = 1
+ for domain in domains:
+ domains_d[(id, domain)] = 1
+ conn.get_table('user_roles').set_many(
+ event.oid, (), ('id', 'role',), roles_d.keys())
+ conn.get_table('user_domains').set_many(
+ event.oid, (), ('id', 'domain',), domains_d.keys())
+ state = list(state)
+ state.sort()
+ return tuple(state)
Property changes on: Products.Ape/trunk/lib/apelib/sql/security.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/sqlbase.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/sqlbase.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/sqlbase.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,74 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Abstract gateways
+
+$Id$
+"""
+
+from apelib.core.interfaces \
+ import IGateway, IDatabaseInitializer, IDatabaseInitEvent
+from apelib.core.schemas import ColumnSchema, RowSequenceSchema
+from interfaces import IRDBMSConnection
+
+
+class SQLGatewayBase:
+ """SQL gateway base class"""
+
+ __implements__ = IGateway, IDatabaseInitializer
+
+ # override these in subclasses
+ table_name = None
+ schema = None
+ table_schema = None
+ oid_columns = [ColumnSchema('oid', 'int', 1)]
+
+ def __init__(self, conn_name='db'):
+ self.conn_name = conn_name
+ if self.table_schema is None:
+ if self.schema is not None:
+ self.table_schema = self.schema
+ else:
+ self.table_schema = RowSequenceSchema()
+ self.column_names = [f.name for f in self.table_schema.get_columns()]
+
+ def get_connection(self, event):
+ return event.connections[self.conn_name]
+
+ def get_table(self, event):
+ c = event.connections[self.conn_name]
+ return c.get_table(self.table_name)
+
+ def create(self, event):
+ self.get_table(event).create()
+
+ def init(self, event):
+ conn = self.get_connection(event)
+ assert IRDBMSConnection.isImplementedBy(conn)
+ all = RowSequenceSchema(
+ self.oid_columns + self.table_schema.get_columns())
+ table = conn.define_table(self.table_name, all)
+ if conn.exists(self.table_name, 'table'):
+ if IDatabaseInitEvent.isImplementedBy(event) and event.clear_all:
+ table.delete_rows()
+ else:
+ table.create()
+
+ def load(self, event):
+ raise NotImplementedError, "abstract method"
+
+ def store(self, event, obj):
+ raise NotImplementedError, "abstract method"
+
+ def get_sources(self, event):
+ return None
Property changes on: Products.Ape/trunk/lib/apelib/sql/sqlbase.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/structure.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/structure.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/structure.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,165 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""SQL gateways for a filesystem-like tree structure.
+
+$Id$
+"""
+
+from apelib.core.schemas import ColumnSchema, RowSequenceSchema
+from sqlbase import SQLGatewayBase
+
+
+class SQLObjectData (SQLGatewayBase):
+ """SQL object data gateway"""
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = ColumnSchema('data', 'string')
+ table_name = 'object_data'
+ table_schema = RowSequenceSchema()
+ table_schema.add('data', 'blob', 0)
+
+ def load(self, event):
+ table = self.get_table(event)
+ firstcol = self.column_names[:1]
+ items = table.select(firstcol, oid=event.oid)
+ if items:
+ state = str(items[0][0])
+ else:
+ state = ''
+ return state, state
+
+ def store(self, event, state):
+ conn = self.get_connection(event)
+ table = self.get_table(event)
+ firstcol = self.column_names[:1]
+ data = (state,)
+ table.set_one(event.oid, firstcol, data, event.is_new)
+ return state
+
+
+class SQLFolderItems (SQLGatewayBase):
+ """SQL folder items gateway"""
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = RowSequenceSchema()
+ schema.add('key', 'string', 1)
+ schema.add('oid', 'string')
+ schema.add('classification', 'classification')
+ table_name = 'folder_items'
+ table_schema = RowSequenceSchema()
+ table_schema.add('name', 'string', 1)
+ table_schema.add('child_oid', 'int', 0)
+
+ def load(self, event):
+ table = self.get_table(event)
+ rows = table.select(self.column_names, oid=event.oid)
+ res = []
+ h = []
+ for name, child_oid in rows:
+ s = str(child_oid)
+ classification = event.classify(s)
+ res.append((name, s, classification))
+ h.append((name, long(child_oid)))
+ h.sort()
+ return res, tuple(h)
+
+ def store(self, event, state):
+ table = self.get_table(event)
+ rows = [(name, long(child_oid)) for (name, child_oid, cls) in state]
+ rows.sort()
+ # Note that set_many() requires the child_oid column to match
+ # its database type.
+ table.set_many(event.oid, ('name',), ('child_oid',), rows)
+ return tuple(rows)
+
+
+class SQLItemId (SQLGatewayBase):
+ """SQL item ID gateway.
+
+ Piggybacks SQLFolderItems for init and store.
+ Makes the assumption that the item is stored in only one place.
+ """
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = ColumnSchema('id', 'string')
+ table_name = 'folder_items'
+ table_schema = RowSequenceSchema()
+ table_schema.add('child_oid', 'int', 1)
+ table_schema.add('name', 'string', 0)
+
+ def init(self, event):
+ pass
+
+ def load(self, event):
+ table = self.get_table(event)
+ rows = table.select(('name',), child_oid=event.oid)
+ if len(rows) >= 1:
+ name = rows[0][0] # Accept only the first result
+ else:
+ name = None
+ # Disable conflict checking by returning None as the hash value.
+ return name, None
+
+ def store(self, event, state):
+ return None
+
+
+class SQLRemainder (SQLObjectData):
+ """SQL remainder pickle gateway"""
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ table_name = 'remainder'
+ table_schema = RowSequenceSchema()
+ table_schema.add('pickle', 'blob', 0)
+
+
+class SQLModTime (SQLGatewayBase):
+ """SQL object mod time gateway"""
+
+ __implements__ = SQLGatewayBase.__implements__
+
+ schema = ColumnSchema('mtime', 'int') # second
+ table_name = 'mtime'
+ table_schema = RowSequenceSchema()
+ table_schema.add('mtime', 'long', 0)
+
+ def load(self, event):
+ table = self.get_table(event)
+ items = table.select(self.column_names, oid=event.oid)
+ if items:
+ state = long(items[0][0])
+ else:
+ state = 0L
+ return state, state
+
+ def store(self, event, state):
+ state = long(state)
+ table = self.get_table(event)
+ data = (state,)
+ table.set_one(event.oid, self.column_names, data, event.is_new)
+ return state
+
+
+def root_mapping():
+ """Returns a gateway suitable for storing the root persistent mapping.
+ """
+ from apelib.core.gateways import CompositeGateway
+ g = CompositeGateway()
+ g.add('references', SQLFolderItems())
+ g.add('others', SQLObjectData())
+ return g
Property changes on: Products.Ape/trunk/lib/apelib/sql/structure.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/sql/table.py
===================================================================
--- Products.Ape/trunk/lib/apelib/sql/table.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/sql/table.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,226 @@
+##############################################################################
+#
+# Copyright (c) 2004 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""SQL implementation of IRDBMSTable.
+
+$Id$
+"""
+
+from apelib.sql.interfaces import IRDBMSTable, IRDBMSColumn
+
+
+class SQLTable:
+ """Talks to a table via SQL."""
+
+ __implements__ = IRDBMSTable
+
+ def __init__(self, connection, name):
+ self.name = name
+ self.execute = connection.execute
+ self.columns = {}
+ self.column_order = []
+
+ def add_column(self, name, column):
+ assert not self.columns.has_key(name)
+ assert IRDBMSColumn.isImplementedBy(column)
+ self.columns[name] = column
+ self.column_order.append(name)
+
+ def cache(self, m, *params):
+ # In the future, this will integrate with AbstractSQLConnection
+ # to provide a clean way to cache and prepare database queries.
+ return m(*params)
+
+ def generate_conditions(self, col_names):
+ clauses = [
+ "%s = :%s" % (self.columns[c].name, c) for c in col_names]
+ return ' AND '.join(clauses)
+
+ def generate_select(self, filter_col_names, result_col_names):
+ result_names = [self.columns[col].name for col in result_col_names]
+ sql = 'SELECT %s FROM %s' % (', '.join(result_names), self.name)
+ where = self.generate_conditions(filter_col_names)
+ if where:
+ sql += ' WHERE %s' % where
+ return sql
+
+ def generate_insert(self, col_names):
+ db_names = [self.columns[c].name for c in col_names]
+ colfmts = [':%s' % c for c in col_names]
+ return 'INSERT INTO %s (%s) VALUES (%s)' % (
+ self.name, ', '.join(db_names), ', '.join(colfmts))
+
+ def generate_update(self, key_col_names, other_col_names):
+ where = self.generate_conditions(key_col_names)
+ to_set = [
+ ("%s = :%s" % (self.columns[c].name, c))
+ for c in other_col_names]
+ return 'UPDATE %s SET %s WHERE %s' % (
+ self.name, ', '.join(to_set), where)
+
+ def generate_delete(self, col_names):
+ where = self.generate_conditions(col_names)
+ sql = 'DELETE FROM %s' % self.name
+ if where:
+ sql += ' WHERE %s' % where
+ return sql
+
+ def prepare_for_db(self, col_names, data, oid=None):
+ """Prepares one row for writing to the database."""
+ res = {}
+ for n in range(len(col_names)):
+ c = col_names[n]
+ res[c] = self.columns[c].to_db(data[n])
+ if oid is not None:
+ res['oid'] = self.columns['oid'].to_db(oid)
+ return res
+
+ #
+ # IRDBMSTable implementation.
+ #
+
+ def select(self, result_col_names, **filter):
+ """Selects rows from a table and returns column values for those rows.
+ """
+ f = {}
+ for col_name, value in filter.items():
+ f[col_name] = self.columns[col_name].to_db(value)
+ sql = self.cache(self.generate_select, filter.keys(), result_col_names)
+ db_res = self.execute(sql, f, fetch=1)
+ # Convert the results to standard types.
+ conversions = []
+ for n in range(len(result_col_names)):
+ col = self.columns[result_col_names[n]]
+ if col.use_conversion:
+ conversions.append((n, col.from_db))
+ if conversions:
+ # Convert specific columns.
+ res = []
+ for row in db_res:
+ r = list(row)
+ for n, from_db in conversions:
+ r[n] = from_db(r[n])
+ res.append(tuple(r))
+ else:
+ # No conversion needed.
+ res = db_res
+ return res
+
+ def insert(self, col_names, row):
+ """Inserts one row in the table.
+ """
+ kw = self.prepare_for_db(col_names, row)
+ sql = self.cache(self.generate_insert, col_names)
+ self.execute(sql, kw)
+
+ def set_one(self, oid, col_names, row, is_new):
+ """Sets one row in a table.
+
+ Requires the table to have only one value for each oid.
+ Executes either an update or insert operation, depending on
+ the is_new argument and configured policies.
+ """
+ kw = self.prepare_for_db(col_names, row, oid)
+ if is_new:
+ sql = self.cache(self.generate_insert, ('oid',) + tuple(col_names))
+ self.execute(sql, kw)
+ else:
+ sql = self.cache(self.generate_update, ('oid',), col_names)
+ self.execute(sql, kw)
+
+ def set_many(self, oid, key_col_names, other_col_names, rows):
+ """Sets multiple rows in a table.
+
+ 'rows' is a sequence of tuples containing values for the
+ key_columns as well as the other_columns.
+
+ Either deletes all rows for an oid and inserts new rows, or
+ examines the current state of the database and modifies it in
+ pieces.
+ """
+ combo = tuple(key_col_names) + tuple(other_col_names)
+ if not key_col_names:
+ # Don't compare rows. Just delete and insert.
+ kw = self.prepare_for_db((), (), oid)
+ sql = self.cache(self.generate_delete, ('oid',))
+ self.execute(sql, kw)
+ sql = self.cache(self.generate_insert, ('oid',) + combo)
+ for row in rows:
+ kw = self.prepare_for_db(combo, row, oid)
+ self.execute(sql, kw)
+ return
+ # Edit the table.
+ exist_rows = self.select(combo, oid=oid)
+ count = len(key_col_names)
+ existing = {}
+ for record in exist_rows:
+ key = tuple(record[:count])
+ value = tuple(record[count:])
+ existing[key] = value
+ now = {}
+ for record in rows:
+ key = tuple(record[:count])
+ value = tuple(record[count:])
+ now[key] = value
+ # Delete and update rows.
+ for key, value in existing.items():
+ if not now.has_key(key):
+ # Delete this row.
+ kw = self.prepare_for_db(key_col_names, key, oid)
+ sql = self.cache(
+ self.generate_delete, ('oid',) + tuple(key_col_names))
+ self.execute(sql, kw)
+ elif now[key] != value:
+ # Update this row.
+ #print 'DIFFERENT:', now[key], value
+ kw = self.prepare_for_db(combo, key + now[key], oid)
+ cols = ('oid',) + tuple(key_col_names)
+ sql = self.cache(self.generate_update, cols, other_col_names)
+ self.execute(sql, kw)
+ for key, value in now.items():
+ if not existing.has_key(key):
+ # Insert this row.
+ kw = self.prepare_for_db(combo, key + value, oid)
+ sql = self.cache(self.generate_insert, ('oid',) + combo)
+ self.execute(sql, kw)
+ return
+
+ def delete_rows(self, **filter):
+ """Deletes rows from the table.
+ """
+ sql = self.cache(self.generate_delete, filter.keys())
+ self.execute(sql, filter)
+
+ def create(self):
+ """Creates the table.
+ """
+ pkeys = []
+ col_decls = []
+ for c in self.column_order:
+ col = self.columns[c]
+ constraints = ''
+ if col.unique:
+ constraints = ' NOT NULL'
+ pkeys.append(col.name)
+ col_decls.append(
+ "%s %s%s" % (col.name, col.type, constraints))
+ if pkeys:
+ col_decls.append('PRIMARY KEY (%s)' % ', '.join(pkeys))
+ sql = "CREATE TABLE %s (%s)" % (self.name, ', '.join(col_decls))
+ self.execute(sql)
+
+ def drop(self):
+ """Drops the table.
+ """
+ sql = "DROP TABLE %s" % self.name
+ self.execute(sql)
Property changes on: Products.Ape/trunk/lib/apelib/sql/table.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,18 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""apelib tests package
+
+$Id$
+"""
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/correct.png
===================================================================
(Binary files differ)
Property changes on: Products.Ape/trunk/lib/apelib/tests/correct.png
___________________________________________________________________
Name: svn:mime-type
+ image/png
Added: Products.Ape/trunk/lib/apelib/tests/serialtestbase.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/serialtestbase.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/serialtestbase.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,69 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Serialization test setup/teardown
+
+$Id$
+"""
+
+import ZODB
+from Persistence import PersistentMapping
+from cPickle import dumps, loads
+
+from apelib.core import classifiers, gateways
+from apelib.core import mapper, oidgen, schemas, serializers
+
+from apelib.zodb3.serializers import PersistentMappingSerializer
+from apelib.zodb3.serializers import RemainingState, RollCall
+
+
+class TestObject(PersistentMapping):
+ strdata = ""
+
+
+def add_mapper(conf, klass, mapper_name):
+ """Adds a simple mapper to the configuration.
+ """
+ serializer = serializers.CompositeSerializer()
+ gateway = gateways.RAMGateway(serializer.schema)
+ class_name = '%s.%s' % (klass.__module__, klass.__name__)
+ m = mapper.Mapper(mapper_name, class_name, serializer, gateway)
+ conf.mappers[mapper_name] = m
+ conf.classifier.add_store_rule(class_name, mapper_name)
+ return m
+
+
+class SerialTestBase:
+
+ def setUp(self):
+ schema = schemas.ColumnSchema("classification", "classification")
+ cfr = classifiers.SimpleClassifier(gateways.RAMGateway(schema))
+ oid_gen = oidgen.SerialOIDGenerator()
+ self.conf = mapper.MapperConfiguration({}, cfr, oid_gen)
+
+ m = add_mapper(self.conf, PersistentMapping, "pm")
+ m.serializer.add("items", PersistentMappingSerializer())
+ m.serializer.add("rollcall", RollCall())
+ m.gateway.schema = m.serializer.schema
+
+ m = add_mapper(self.conf, TestObject, "tm")
+ m.serializer.add("items", PersistentMappingSerializer())
+ m.serializer.add("remainder", RemainingState())
+ m.gateway.schema = m.serializer.schema
+
+ self.conf.check()
+ self.conns = {}
+
+ def tearDown(self):
+ pass
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/serialtestbase.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testall.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testall.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testall.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,71 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Run all unit tests
+
+To run all tests, invoke this script with the PYTHONPATH environment
+variable set. Example:
+
+PYTHONPATH=~/cvs/Zope/lib/python python testAll.py
+
+$Id$
+"""
+
+import sys, unittest
+
+try:
+ import apelib
+except ImportError:
+ # The Ape product makes apelib available as a top-level package.
+ import ZODB
+ import Products.Ape
+ import apelib
+
+from testserialization import SerializationTests
+from testimpl import ApelibImplTests
+from teststorage import ApeStorageTests
+from testio import ApeIOTests
+from testzope2fs import Zope2FSTests, Zope2FSUnderscoreTests
+from testparams import ParamsTests
+from testsqlimpl import ApelibSQLImplTests
+from testzodbtables import ZODBTableTests, ZODBTableTestsWithoutPrimaryKey
+from testscanner import ScanControlTests, ScannerTests
+from testzope2sql import PsycopgTests, MySQLTests
+import testzope2sql
+
+
+sql_suite = testzope2sql.test_suite()
+
+def test_suite():
+ suite = unittest.TestSuite()
+ for klass in (
+ SerializationTests,
+ ZODBTableTests,
+ ZODBTableTestsWithoutPrimaryKey,
+ ApelibImplTests,
+ ApeStorageTests,
+ ApeIOTests,
+ Zope2FSTests,
+ Zope2FSUnderscoreTests,
+ ParamsTests,
+ ApelibSQLImplTests,
+ ScanControlTests,
+ ScannerTests,
+ ):
+ suite.addTest(unittest.makeSuite(klass, 'test'))
+ suite.addTest(sql_suite)
+ return suite
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/testall.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testimpl.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testimpl.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testimpl.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,91 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interface implementation tests
+
+$Id$
+"""
+
+import os
+import unittest
+from types import ListType, TupleType
+
+from Interface import Interface
+from Interface.IInterface import IInterface
+from Interface.Verify import verifyClass
+
+
+class InterfaceImplChecker:
+
+ def _test_object_imp(self, c):
+ try:
+ impl = c.__implements__
+ self._verify(impl, c)
+ except:
+ print '%s incorrectly implements %s' % (repr(c), repr(impl))
+ raise
+
+ def _test_all_in_module(self, m):
+ name = m.__name__
+ for attr, value in m.__dict__.items():
+ if (hasattr(value, '__implements__') and
+ not IInterface.isImplementedBy(value)
+ and getattr(value, '__module__', None) == name):
+ self._test_object_imp(value)
+
+ def _test_all_in_package(self, p):
+ seen = {'__init__': 1}
+ for path in p.__path__:
+ names = os.listdir(path)
+ for name in names:
+ base, ext = os.path.splitext(name)
+ ext = ext.lower()
+ if ext in ('.py', '.pyc', '.pyo'):
+ if seen.has_key(base):
+ continue
+ seen[base] = 1
+ modname = '%s.%s' % (p.__name__, base)
+ m = __import__(modname, {}, {}, ('__doc__',))
+ self._test_all_in_module(m)
+
+ def _verify(self, iface, c):
+ if isinstance(iface, ListType) or isinstance(iface, TupleType):
+ for item in iface:
+ self._verify(item, c)
+ else:
+ verifyClass(iface, c)
+ for base in iface.getBases():
+ self._verify(base, c)
+
+
+class ApelibImplTests(InterfaceImplChecker, unittest.TestCase):
+
+ def test_core_implementations(self):
+ import apelib.core
+ self._test_all_in_package(apelib.core)
+
+ def test_zope2_implementations(self):
+ import apelib.zope2
+ self._test_all_in_package(apelib.zope2)
+
+ def test_fs_implementations(self):
+ import apelib.fs
+ self._test_all_in_package(apelib.fs)
+
+ def test_zodb3_implementations(self):
+ import apelib.zodb3
+ self._test_all_in_package(apelib.zodb3)
+
+if __name__ == '__main__':
+ unittest.main()
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/testimpl.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testio.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testio.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testio.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,124 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Tests of apelib.core.io
+
+$Id$
+"""
+
+import unittest
+
+import ZODB
+from Persistence import PersistentMapping
+
+from apelib.core import io
+from apelib.core.interfaces import IObjectDatabase
+from serialtestbase import SerialTestBase, TestObject
+
+
+class TestObjectDatabase:
+ __implements__ = IObjectDatabase
+
+ def get(self, oid, hints=None):
+ raise NotImplementedError
+
+ def identify(self, obj):
+ raise NotImplementedError
+
+ def new_oid(self):
+ raise NotImplementedError
+
+ def get_class(self, module, name):
+ m = __import__(module)
+ return getattr(m, name)
+
+
+
+class ApeIOTests(SerialTestBase, unittest.TestCase):
+
+ def get_object_database(self):
+ return TestObjectDatabase()
+
+ def test_impl(self):
+ # Test of test :-)
+ from Interface.Verify import verifyClass
+ verifyClass(IObjectDatabase, TestObjectDatabase)
+
+ def test_serialize_and_deserialize(self):
+ ob = TestObject()
+ ob.strdata = '345'
+ ob['a'] = 'b'
+ ob['c'] = 'd'
+ oid = 'test'
+ obj_db = self.get_object_database()
+ obsys = io.ObjectSystemIO(self.conf, obj_db)
+ event, classification, state = obsys.serialize(oid, ob)
+
+ ob2 = obsys.new_instance(oid, classification)
+ obsys.deserialize(oid, ob2, classification, state)
+ self.assertEqual(ob.strdata, ob2.strdata)
+ self.assertEqual(ob.data, ob2.data)
+
+
+ def test_store_and_load(self):
+ # Tests both serialization and storage
+ ob = TestObject()
+ ob.strdata = '345'
+ ob['a'] = 'b'
+ ob['c'] = 'd'
+ oid = 'test'
+ obj_db = self.get_object_database()
+ obsys = io.ObjectSystemIO(self.conf, obj_db)
+ gwsys = io.GatewayIO(self.conf, self.conns)
+ event, classification, state = obsys.serialize(oid, ob)
+ gwsys.store(oid, classification, state, True)
+
+ event, classification, state, hash_value = gwsys.load(oid)
+ ob2 = obsys.new_instance(oid, classification)
+ obsys.deserialize(oid, ob2, classification, state)
+ self.assertEqual(ob.strdata, ob2.strdata)
+ self.assertEqual(ob.data, ob2.data)
+
+
+ def test_export_import(self):
+ root = PersistentMapping()
+
+ test1 = TestObject()
+ test1.strdata = '345'
+ test1['a'] = 'b'
+ test1['c'] = 'd'
+ root['TestRoot'] = test1
+ test2 = TestObject()
+ test2.leftover = 'oops'
+ test2['true'] = 'undecided'
+ root['TestRoot2'] = test2
+
+ oid = ''
+ exporter = io.ExportImport(self.conf, self.conns)
+ exporter.export_object(root, oid)
+
+ importer = io.ExportImport(self.conf, self.conns)
+ roota = importer.import_object(oid)
+ self.assert_(root is not roota)
+ self.assert_(root['TestRoot'] is not roota['TestRoot'])
+ self.assert_(root['TestRoot2'] is not roota['TestRoot2'])
+ self.assertEqual(root['TestRoot'].data, roota['TestRoot'].data)
+ self.assertEqual(root['TestRoot2'].data, roota['TestRoot2'].data)
+ self.assertEqual(root['TestRoot'].strdata, roota['TestRoot'].strdata)
+ self.assertEqual(root['TestRoot2'].leftover,
+ roota['TestRoot2'].leftover)
+
+
+
+if __name__ == '__main__':
+ unittest.main()
Property changes on: Products.Ape/trunk/lib/apelib/tests/testio.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testparams.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testparams.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testparams.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,61 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Tests of gateway_fs.params
+
+$Id$
+"""
+
+import unittest
+
+from apelib.fs.params import string_to_params, params_to_string
+
+
+class ParamsTests(unittest.TestCase):
+
+ def test_string_to_params(self):
+ s = 'abc def="123 456\\n \\"done\\" " ghi=4 j567 \n'
+ params = string_to_params(s)
+ self.assertEqual(tuple(params), (
+ ('abc', ''),
+ ('def', '123 456\n "done" '),
+ ('ghi', '4'),
+ ('j567', ''),
+ ))
+
+ def test_params_to_string(self):
+ params = (
+ ('abc', ''),
+ ('def', '123 456\n "done" '),
+ ('ghi', '4'),
+ ('j567', ''),
+ )
+ s = params_to_string(params)
+ self.assertEqual(s, 'abc def="123 456\\n \\"done\\" " ghi="4" j567')
+
+ def test_invalid_keys(self):
+ params_to_string((('abc_-09ABC', ''),))
+ self.assertRaises(ValueError, params_to_string, (('a bc', ''),))
+ self.assertRaises(ValueError, params_to_string, (('a\nbc', ''),))
+ self.assertRaises(ValueError, params_to_string, (('', ''),))
+ self.assertRaises(ValueError, params_to_string, ((' abc', ''),))
+ self.assertRaises(ValueError, params_to_string, (('abc ', ''),))
+ self.assertRaises(ValueError, params_to_string, (('a\tbc', ''),))
+ self.assertRaises(ValueError, params_to_string, (('a\rbc', ''),))
+ self.assertRaises(ValueError, params_to_string, (('a"bc', ''),))
+ self.assertRaises(ValueError, params_to_string, (('0abc', ''),))
+
+
+if __name__ == '__main__':
+ unittest.main()
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/testparams.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testscanner.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testscanner.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testscanner.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,177 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Cache scanner tests
+
+$Id$
+"""
+
+import unittest
+from time import time
+
+from apelib.zodb3.scanner import PoolScanControl, Scanner
+
+
+class FakeRepository:
+
+ def poll(self, d):
+ res = {}
+ for source in d.keys():
+ repo, location = source
+ if repo is not self:
+ raise AssertionError, "repo must be self"
+ if str(location) != location:
+ raise AssertionError(
+ "location %s is not a string" % repr(location))
+ # Always report a change
+ res[source] = 1001
+ return res
+
+
+class FakeStorage:
+
+ repo = FakeRepository()
+
+ def get_all_sources(self, oids):
+ res = {}
+ for oid in oids:
+ res[oid] = {(self.repo, str(oid)): 10}
+ return res
+
+
+class ScanControlTests(unittest.TestCase):
+
+ def setUp(self):
+ storage = self.storage = FakeStorage()
+ scanner = self.scanner = Scanner()
+ storage.scanner = scanner
+ scanner.storage = storage
+ ctl = self.ctl = PoolScanControl(storage)
+ self.conn1 = ctl.new_connection()
+ self.conn2 = ctl.new_connection()
+
+ def test_set_new_oids(self):
+ self.conn1.set_oids([5, 8])
+ oids = list(self.ctl.oids.keys())
+ self.assertEqual(oids, [5, 8])
+ self.assertEqual(list(self.ctl.conn_oids.keys()), [self.conn1.conn_id])
+
+ def test_set_multiple_connection_oids(self):
+ self.conn1.set_oids([5, 8])
+ self.conn2.set_oids([8, 9])
+ oids = list(self.ctl.oids.keys())
+ self.assertEqual(oids, [5,8,9])
+ conns = list(self.ctl.conn_oids.keys())
+ self.assertEqual(conns, [self.conn1.conn_id, self.conn2.conn_id])
+
+ def test_remove_oids(self):
+ self.conn1.set_oids([5, 8])
+ self.conn2.set_oids([8, 9])
+ self.conn1.set_oids([8])
+ oids = list(self.ctl.oids.keys())
+ self.assertEqual(oids, [8,9])
+ conns = list(self.ctl.conn_oids.keys())
+ self.assertEqual(conns, [self.conn1.conn_id, self.conn2.conn_id])
+
+ self.conn1.set_oids([])
+ oids = list(self.ctl.oids.keys())
+ self.assertEqual(oids, [8,9])
+ self.assertEqual(list(self.ctl.conn_oids.keys()), [self.conn2.conn_id])
+
+
+class ScannerTests(unittest.TestCase):
+
+ def setUp(self):
+ storage = self.storage = FakeStorage()
+ scanner = self.scanner = Scanner()
+ storage.scanner = scanner
+ scanner.storage = storage
+ ctl = self.ctl = PoolScanControl(storage)
+ self.conn1 = ctl.new_connection()
+ self.conn2 = ctl.new_connection()
+ self.repo = FakeRepository()
+
+ def test_add_source(self):
+ new_sources = {(self.repo, '5'): 0}
+ self.scanner.after_load(5, new_sources)
+ self.assertEqual(len(self.scanner.future), 1)
+ self.assertEqual(self.scanner.future[5][0], new_sources)
+
+ def test_no_updates_when_not_invalidating(self):
+ # Don't change current except in scan(), where invalidation
+ # messages are possible.
+ self.conn1.set_oids([5])
+
+ sources = {(self.repo, '5'): 0}
+ self.scanner.after_load(5, sources)
+ self.assertNotEqual(self.scanner.current[5], sources)
+
+ def test_remove_oid(self):
+ self.conn1.set_oids([5])
+ self.assertEqual(len(self.scanner.current), 1)
+ self.conn1.set_oids([])
+ self.assertEqual(len(self.scanner.current), 0)
+
+ def test_scan(self):
+ self.conn1.set_oids([5])
+ new_sources = {(self.repo, '6'): 0, (self.repo, '7'): 0, }
+ self.scanner.after_load(5, new_sources)
+ to_invalidate = self.scanner.scan()
+ self.assertEqual(len(to_invalidate), 1)
+
+ def test_pool_scan(self):
+ self.conn1.set_oids([5])
+ new_sources = {(self.repo, '6'): 0, (self.repo, '7'): 0, }
+ self.scanner.after_load(5, new_sources)
+ # Just test that ctl.scan() executes without error.
+ self.ctl.scan()
+
+ def test_prune_future(self):
+ # Simulate some data.
+ self.scanner.future[5] = ([], time()) # Should not be pruned
+ self.scanner.future[900] = ([], time() - 100000) # Should be pruned
+ self.scanner.prune_future()
+ self.assertEqual(len(self.scanner.future), 1)
+ self.assert_(self.scanner.future.has_key(5))
+
+ def test_find_new_sources(self):
+ # Verify the scanner calls storage.getSources() and saves the result.
+ self.conn1.set_oids([5])
+ expect_sources = self.storage.get_all_sources([5])[5]
+ self.assertEqual(self.scanner.current[5], expect_sources)
+
+ def test_use_cached_sources(self):
+ # Verify the scanner uses previously cached sources when available.
+ repo = FakeRepository()
+ sources = {(repo, '999'): -1}
+ self.scanner.after_load(5, sources)
+ self.conn1.set_oids([5])
+ self.assertEqual(self.scanner.current[5], sources)
+
+ def test_use_committed_sources(self):
+ # Verify the scanner updates sources according to transactions.
+ repo = FakeRepository()
+ sources = {(repo, '999'): -1}
+ self.scanner.after_load(5, sources)
+ self.conn1.set_oids([5])
+ sources_2 = {(repo, '999'): -2}
+ self.scanner.changed_sources(5, sources_2)
+ final_sources = self.scanner.current[5]
+ self.assertEqual(len(final_sources), 1)
+ self.assertEqual(final_sources.keys()[0], (repo, '999'))
+ self.assertEqual(final_sources.values()[0], -2)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/testscanner.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testserialization.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testserialization.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testserialization.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,141 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Serialization tests
+
+$Id$
+"""
+
+import unittest
+
+import ZODB
+from Persistence import PersistentMapping
+
+from apelib.core.events \
+ import LoadEvent, StoreEvent, SerializationEvent, DeserializationEvent
+from apelib.core.interfaces import SerializationError
+from serialtestbase import SerialTestBase, TestObject
+
+
+class SimpleClass:
+ """Represents second-class persistent objects.
+ """
+ def __init__(self, data):
+ self.data = data
+
+
+class MockObjectDatabase:
+ """Implements only enough to satisfy testCatchExtraAttribute
+ """
+ def identify(self, obj):
+ return None
+
+
+class SerializationTests(SerialTestBase, unittest.TestCase):
+ """Tests of basic events, serializers, and gateways.
+
+ No connections or object databases are provided.
+ """
+
+ def test_serialize_and_deserialize(self):
+ classification = None
+ ob = TestObject()
+ ob['a'] = 'b'
+ ob['c'] = 'd'
+ obj_db = None
+ m = self.conf.mappers["tm"]
+ event = SerializationEvent(
+ self.conf, m, '', classification, obj_db, ob)
+ full_state = m.serializer.serialize(event)
+ ob2 = TestObject()
+ event = DeserializationEvent(
+ self.conf, m, '', classification, obj_db, ob2)
+ m.serializer.deserialize(event, full_state)
+ self.assertEqual(ob.strdata, ob2.strdata)
+ self.assertEqual(ob.data, ob2.data)
+
+ def test_store_and_load(self):
+ classification = None
+ ob = TestObject()
+ ob.strdata = '345'
+ ob['a'] = 'b'
+ ob['c'] = 'd'
+ obj_db = None
+ m = self.conf.mappers["tm"]
+ event = SerializationEvent(
+ self.conf, m, '', classification, obj_db, ob)
+ full_state = m.serializer.serialize(event)
+ event = StoreEvent(
+ self.conf, m, '', classification, self.conns, True)
+ m.gateway.store(event, full_state)
+
+ event = LoadEvent(
+ self.conf, m, '', classification, self.conns)
+ full_state, serial = m.gateway.load(event)
+ ob2 = TestObject()
+ event = DeserializationEvent(
+ self.conf, m, '', classification, obj_db, ob2)
+ m.serializer.deserialize(event, full_state)
+ self.assertEqual(ob.strdata, ob2.strdata)
+ self.assertEqual(ob.data, ob2.data)
+
+ def test_catch_extra_attribute(self):
+ # The mapper for PersistentMappings doesn't allow an
+ # extra attribute.
+ classification = None
+ ob = PersistentMapping()
+ ob.extra = '678'
+ ob['a'] = 'b'
+ ob['c'] = 'd'
+ obj_db = MockObjectDatabase()
+ m = self.conf.mappers["pm"]
+ event = SerializationEvent(
+ self.conf, m, '', classification, obj_db, ob)
+ self.assertRaises(SerializationError, m.serializer.serialize, event)
+
+ def test_shared_attribute(self):
+ # Test of an attribute shared between a normal serializer and
+ # a remainder serializer.
+ classification = None
+ ob = TestObject()
+ data = SimpleClass('This is a shared piece of data')
+ ob.extra = data
+ ob['a'] = data
+ obj_db = None
+ m = self.conf.mappers["tm"]
+ event = SerializationEvent(
+ self.conf, m, '', classification, obj_db, ob)
+ full_state = m.serializer.serialize(event)
+ event = StoreEvent(
+ self.conf, m, '', classification, self.conns, True)
+ m.gateway.store(event, full_state)
+
+ # Now load the state into a different object
+ event = LoadEvent(
+ self.conf, m, '', classification, self.conns)
+ full_state, serial = m.gateway.load(event)
+ ob2 = TestObject()
+ event = DeserializationEvent(
+ self.conf, m, '', classification, obj_db, ob2)
+ m.serializer.deserialize(event, full_state)
+ self.assertEqual(ob.extra.data, ob2.extra.data)
+ self.assertEqual(ob.keys(), ob2.keys())
+
+ # Check that both ways to access the SimpleClass instance
+ # result in the same object.
+ self.assert_(ob2['a'] is ob2.extra, (ob2['a'], ob2.extra))
+ self.assert_(ob2['a'] is not data) # Verify it didn't cheat somehow
+
+
+if __name__ == '__main__':
+ unittest.main()
Property changes on: Products.Ape/trunk/lib/apelib/tests/testserialization.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testsqlimpl.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testsqlimpl.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testsqlimpl.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,33 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interface implementation tests
+
+$Id$
+"""
+
+import unittest
+
+from testimpl import InterfaceImplChecker
+
+
+class ApelibSQLImplTests(InterfaceImplChecker, unittest.TestCase):
+
+ def test_sql_implementations(self):
+ import apelib.sql
+ import apelib.sql.oidgen
+ self._test_all_in_package(apelib.sql)
+
+if __name__ == '__main__':
+ unittest.main()
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/testsqlimpl.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/teststorage.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/teststorage.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/teststorage.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,408 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Storage tests (with data stored in simple mappings)
+
+$Id$
+"""
+
+import unittest
+from thread import start_new_thread, allocate_lock
+
+import ZODB
+from Persistence import Persistent, PersistentMapping
+
+from apelib.zodb3.db import ApeDB
+from apelib.zodb3.storage import ApeStorage
+from apelib.zodb3.resource import StaticResource
+from apelib.zodb3.utils import zodb_copy
+from apelib.core.interfaces import OIDConflictError
+from serialtestbase import SerialTestBase, TestObject
+
+
+def run_in_thread(f):
+ """Calls a function in another thread and waits for it to finish."""
+ lock = allocate_lock()
+ def run(f=f, lock=lock):
+ try:
+ f()
+ finally:
+ lock.release()
+ lock.acquire()
+ start_new_thread(run, ())
+ lock.acquire()
+ lock.release()
+
+
+class ApeStorageTests (SerialTestBase, unittest.TestCase):
+ # Tests of ApeStorage and ApeConnection.
+
+ def setUp(self):
+ SerialTestBase.setUp(self)
+ resource = StaticResource(self.conf)
+ storage = ApeStorage(resource, self.conns)
+ self.storage = storage
+ db = ApeDB(storage, resource)
+ self.db = db
+
+ def tearDown(self):
+ get_transaction().abort()
+ self.db.close()
+ SerialTestBase.tearDown(self)
+
+ def test_store_and_load(self):
+ ob = TestObject()
+ ob.strdata = '345'
+ ob['a'] = 'b'
+ ob['c'] = 'd'
+
+ conn1 = self.db.open()
+ conn2 = None
+ conn3 = None
+ try:
+
+ # Load the root and create a new object
+ root = conn1.root()
+ get_transaction().begin()
+ root['TestRoot'] = ob
+ get_transaction().commit()
+ ob1 = conn1.root()['TestRoot']
+ self.assertEqual(ob1.strdata, ob.strdata)
+ self.assertEqual(ob1.items(), ob.items())
+
+ # Verify a new object was stored and make a change
+ get_transaction().begin()
+ conn2 = self.db.open()
+ ob2 = conn2.root()['TestRoot']
+ self.assertEqual(ob2.strdata, ob.strdata)
+ self.assertEqual(ob2.items(), ob.items())
+ ob2.strdata = '678'
+ get_transaction().commit()
+
+ # Verify the change was stored and make another change
+ conn3 = self.db.open()
+ ob3 = conn3.root()['TestRoot']
+ self.assertEqual(ob3.strdata, '678')
+ self.assertEqual(ob3.items(), ob.items())
+ ob3.strdata = '901'
+ get_transaction().commit()
+ conn3.close()
+ conn3 = None
+ conn3 = self.db.open()
+ ob3 = conn3.root()['TestRoot']
+ self.assertEqual(ob3.strdata, '901')
+
+ # Verify we didn't accidentally change the original object
+ self.assertEqual(ob.strdata, '345')
+
+ finally:
+ conn1.close()
+ if conn2 is not None:
+ conn2.close()
+ if conn3 is not None:
+ conn3.close()
+
+
+ def test_unmanaged(self):
+ ob = TestObject()
+ ob['a'] = 'b'
+ ob.stowaway = PersistentMapping()
+ ob.stowaway['c'] = 'd'
+
+ conn1 = self.db.open()
+ conn2 = None
+ conn3 = None
+ try:
+
+ # Load the root and create a new object
+ root = conn1.root()
+ get_transaction().begin()
+ root['TestRoot2'] = ob
+ get_transaction().commit()
+ ob1 = conn1.root()['TestRoot2']
+ self.assert_(ob1 is ob)
+ self.assertEqual(ob1.items(), [('a', 'b')])
+ self.assertEqual(ob1.stowaway.items(), [('c', 'd')])
+
+ # Verify a new object was stored
+ get_transaction().begin()
+ conn2 = self.db.open()
+ ob2 = conn2.root()['TestRoot2']
+ self.assertEqual(ob2.items(), [('a', 'b')])
+ self.assertEqual(ob2.stowaway.items(), [('c', 'd')])
+
+ # Make a change only to the unmanaged persistent object
+ # (the "stowaway").
+ ob.stowaway['c'] = 'e'
+ get_transaction().commit()
+
+ # Verify the change was stored and make a change to the
+ # managed persistent object.
+ conn3 = self.db.open()
+ ob3 = conn3.root()['TestRoot2']
+ self.assertEqual(ob3.items(), [('a', 'b')])
+ self.assertEqual(ob3.stowaway.items(), [('c', 'e')])
+ ob3['a'] = 'z'
+ get_transaction().commit()
+ conn3.close()
+ conn3 = None
+ conn3 = self.db.open()
+ ob3 = conn3.root()['TestRoot2']
+ self.assertEqual(ob3['a'], 'z')
+ self.assertEqual(ob3.stowaway.items(), [('c', 'e')])
+
+ # Verify we didn't accidentally change the original object.
+ self.assertEqual(ob['a'], 'b')
+
+ # sync and verify the current state.
+ conn1.sync()
+ self.assertEqual(ob1.items(), [('a', 'z')])
+ self.assertEqual(ob1.stowaway.items(), [('c', 'e')])
+
+ finally:
+ conn1.close()
+ if conn2 is not None:
+ conn2.close()
+ if conn3 is not None:
+ conn3.close()
+
+
+ def test_store_and_load_binary(self):
+ ob = TestObject()
+ # strdata contains binary characters
+ ob.strdata = ''.join([chr(n) for n in range(256)]) * 2
+
+ conn1 = self.db.open()
+ try:
+ root = conn1.root()
+ get_transaction().begin()
+ root['TestRoot'] = ob
+ get_transaction().commit()
+ ob1 = conn1.root()['TestRoot']
+ self.assertEqual(ob1.strdata, ob.strdata)
+ self.assertEqual(ob1.items(), ob.items())
+ finally:
+ conn1.close()
+
+
+ def _write_basic_object(self, conn):
+ ob = TestObject()
+ ob.strdata = 'abc'
+ root = conn.root()
+ get_transaction().begin()
+ root['TestRoot'] = ob
+ get_transaction().commit()
+ return ob
+
+
+ def _change_test_root(self):
+ conn = self.db.open()
+ try:
+ ob = conn.root()['TestRoot']
+ ob.strdata = 'ghi'
+ get_transaction().commit()
+ finally:
+ conn.close()
+
+
+ def test_conflict_detection(self):
+ conn1 = self.db.open()
+ try:
+ ob1 = self._write_basic_object(conn1)
+ ob1.strdata = 'def'
+ run_in_thread(self._change_test_root)
+ # Don't let the Connection generate the conflict. This is
+ # a test of the storage.
+ conn1._invalidated.clear()
+ # Verify that "def" doesn't get written, since it
+ # conflicts with "ghi".
+ self.assertRaises(ZODB.POSException.ConflictError,
+ get_transaction().commit)
+ self.assertEqual(ob1.strdata, "ghi")
+ finally:
+ conn1.close()
+
+
+ def test_debug_conflict_errors(self):
+ # When debug_conflicts is on, ApeStorage generates a
+ # RuntimeError with information instead of a simple
+ # ConflictError, making it easier to pinpoint the problem.
+ self.storage.set_debug_conflicts(1)
+ conn1 = self.db.open()
+ try:
+ ob1 = self._write_basic_object(conn1)
+ ob1.strdata = 'def'
+ run_in_thread(self._change_test_root)
+ # Don't let the Connection generate the conflict. This is
+ # a test of the storage.
+ conn1._invalidated.clear()
+ self.assertRaises(RuntimeError, get_transaction().commit)
+ finally:
+ conn1.close()
+
+
+ def test_new_object_conflict_detection(self):
+ # Verify a new object won't overwrite existing objects by accident
+ conn1 = self.db.open()
+ try:
+ ob1 = self._write_basic_object(conn1)
+ ob1.strdata = 'def'
+ conn1._set_serial(ob1, '\0' * 8) # Pretend that it's new
+ self.assertRaises(OIDConflictError, get_transaction().commit)
+ finally:
+ conn1.close()
+
+
+ def test_remainder_cyclic_reference_restoration(self):
+ # test whether the remainder pickler properly stores cyclic references
+ # back to the object itself.
+ ob1 = TestObject()
+ ob1.myself = ob1
+
+ conn1 = self.db.open()
+ try:
+ root = conn1.root()
+ get_transaction().begin()
+ root['TestRoot2'] = ob1
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ ob2 = conn2.root()['TestRoot2']
+ self.assert_(ob2.myself is ob2)
+ self.assert_(ob2 is not ob1) # Verify it didn't cheat somehow
+ finally:
+ conn2.close()
+ finally:
+ conn1.close()
+
+
+ def test_copy_of(self):
+ # Verifies the functionality of zodb_copy().
+ ob1 = PersistentMapping()
+ ob1._p_oid = 'xxx'
+ self.assertEqual(ob1._p_oid, 'xxx') # Precondition
+ ob1['fish'] = PersistentMapping()
+ ob1['fish']['trout'] = 1
+ ob1['fish']['herring'] = 2
+
+ ob2 = zodb_copy(ob1)
+ self.assert_(ob2 is not ob1)
+ self.assert_(ob2['fish'] is not ob1['fish'])
+ self.assert_(ob2._p_oid is None)
+ self.assertEqual(list(ob2.keys()), ['fish'])
+ self.assertEqual(len(ob2['fish'].keys()), 2)
+
+
+ def test_copy_of_zclass_instance(self):
+ # Verifies that zodb_copy() can copy instances that look like ZClass
+ # instances.
+ class weird_class (Persistent):
+ pass
+ weird_class.__module__ = '*IAmAZClassModule'
+ self.assertEqual(weird_class.__module__, '*IAmAZClassModule')
+
+ ob1 = PersistentMapping()
+ ob1['fishy'] = weird_class()
+
+ ob2 = zodb_copy(ob1)
+ self.assert_(ob2 is not ob1)
+ self.assert_(ob2['fishy'] is not ob1['fishy'])
+ self.assert_(ob2['fishy'].__class__ is weird_class)
+
+
+ def test_p_serial_untouched(self):
+ # _p_serial isn't safe to use for hashes, since _p_mtime
+ # interprets it as a date stamp. Verify Ape doesn't
+ # use _p_serial for hashes.
+ conn1 = self.db.open()
+ try:
+ ob1 = self._write_basic_object(conn1)
+ self.assertEqual(ob1._p_serial, "\0" * 8)
+ finally:
+ conn1.close()
+
+
+ def test_get_serial(self):
+ # Verifies the behavior of _get_serial().
+ conn1 = self.db.open()
+ try:
+ new_ob = TestObject()
+ self.assertEqual(conn1._get_serial(new_ob), '\0' * 8)
+ ob1 = self._write_basic_object(conn1)
+ self.assertNotEqual(conn1._get_serial(ob1), '\0' * 8)
+ finally:
+ conn1.close()
+
+
+ def test_get_serial_detects_new_objects(self):
+ # Verifies the behavior of _get_serial() and _set_serial().
+ conn1 = self.db.open()
+ try:
+ ob = self._write_basic_object(conn1)
+ self.assertNotEqual(conn1._get_serial(ob), '\0' * 8)
+ # Replace the object and verify it gets a new serial.
+ ob1 = PersistentMapping()
+ ob1.strdata = 'cba'
+ ob1._p_oid = conn1.root()['TestRoot']._p_oid
+ conn1.root()['TestRoot'] = ob1
+ self.assertEqual(conn1._get_serial(ob1), '\0' * 8)
+ finally:
+ conn1.close()
+
+
+ def test_serial_cleanup(self):
+ # Verify that _set_serial() cleans up.
+ conn1 = self.db.open()
+ try:
+ conn1.serial_cleanup_threshold = 10
+ for n in range(conn1.serial_cleanup_threshold + 1):
+ new_ob = PersistentMapping()
+ new_ob._p_oid = 'fake_oid_' + str(n)
+ old_size = len(conn1._serials or ())
+ conn1._set_serial(new_ob, '01234567')
+ new_size = len(conn1._serials)
+ if new_size < old_size:
+ # Cleaned up. Success.
+ break
+ else:
+ self.fail("_set_serial() did not clean up")
+ finally:
+ conn1.close()
+
+
+ def test_get_all_sources(self):
+ root_oid = self.conf.oid_gen.root_oid
+ sources = self.storage.get_all_sources([root_oid])
+ self.assert_(not sources[root_oid])
+ # The test passed, but check for a false positive.
+ oid = 'nonexistent-oid'
+ self.assertRaises(KeyError, self.storage.get_all_sources, [oid])
+
+
+ def test_clean_changed(self):
+ # Verify the storage discards the list of changed objects on
+ # commit or abort.
+ conn1 = self.db.open()
+ try:
+ ob1 = self._write_basic_object(conn1)
+ self.assertEqual(len(self.storage.changed), 0)
+ ob1.strdata = 'def'
+ get_transaction().abort()
+ self.assertEqual(len(self.storage.changed), 0)
+ finally:
+ conn1.close()
+
+
+if __name__ == '__main__':
+ unittest.main()
Property changes on: Products.Ape/trunk/lib/apelib/tests/teststorage.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testzodbtables.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testzodbtables.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testzodbtables.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,254 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""zodbtables tests.
+
+$Id$
+"""
+
+import unittest
+from time import time
+
+from apelib.zodb3 import zodbtables
+
+
+TEST_DATA = [
+ {'name': 'Jose',
+ 'sex': 'm',
+ 'address': '101 Example St.',
+ 'phone': '123-4567',
+ },
+ {'name': 'Maria',
+ 'sex': 'f',
+ 'address': '102 Example St.',
+ },
+ {'name': 'Carlos',
+ 'sex': 'm',
+ 'phone': '987-6543',
+ },
+ {'name': 'Tiago',
+ 'sex': 'm',
+ 'phone': '123-4567',
+ },
+ {'name': 'Ana',
+ 'sex': 'f',
+ 'phone': '123-4567',
+ },
+ ]
+
+
+class ZODBTableTests(unittest.TestCase):
+
+ table_schema = zodbtables.TableSchema()
+ table_schema.add('name', primary=1, indexed=1)
+ table_schema.add('sex', indexed=1)
+ table_schema.add('address')
+ table_schema.add('phone', indexed=1)
+
+ def setUp(self):
+ self.table = table = zodbtables.Table(self.table_schema)
+ for data in TEST_DATA:
+ table.insert(data)
+
+ def tearDown(self):
+ get_transaction().abort()
+
+ def test_select_by_name(self):
+ # Searches by primary key
+ records = self.table.select({'name': 'Jose'})
+ self.assertEqual(len(records), 1)
+ self.assertEqual(records[0]['address'], '101 Example St.')
+
+ def test_select_by_unknown_name(self):
+ # Searches by primary key
+ records = self.table.select({'name': 'Joao'})
+ self.assertEqual(len(records), 0)
+
+ def test_select_by_phone(self):
+ # Searches by index
+ records = self.table.select({'phone': '987-6543'})
+ self.assertEqual(len(records), 1)
+ self.assertEqual(records[0]['name'], 'Carlos')
+
+ def test_select_by_address(self):
+ # Searches one-by-one
+ records = self.table.select({'address': '102 Example St.'})
+ self.assertEqual(len(records), 1)
+ self.assertEqual(records[0]['name'], 'Maria')
+
+ def test_select_males(self):
+ records = self.table.select({'sex': 'm'})
+ self.assertEqual(len(records), 3)
+
+ def test_select_females(self):
+ records = self.table.select({'sex': 'f'})
+ self.assertEqual(len(records), 2)
+
+ def test_select_by_name_and_sex(self):
+ records = self.table.select({'name': 'Jose', 'sex': 'm'})
+ self.assertEqual(len(records), 1)
+
+ def test_select_by_name_and_incorrect_sex(self):
+ records = self.table.select({'name': 'Jose', 'sex': 'f'})
+ self.assertEqual(len(records), 0)
+
+ def test_Select_By_Sex_And_Phone(self):
+ # Intersects two indexes
+ records = self.table.select({'phone': '123-4567', 'sex': 'm'})
+ self.assertEqual(len(records), 2)
+
+ def test_select_all(self):
+ records = self.table.select({})
+ self.assertEqual(len(records), 5)
+
+ def test_insert_minimal(self):
+ self.table.insert({'name': 'Edson'})
+
+ def test_insert_duplicate(self):
+ self.assertRaises(zodbtables.DuplicateError,
+ self.table.insert, {'name':'Carlos'})
+
+ def test_insert_without_primary_key(self):
+ self.assertRaises(ValueError, self.table.insert, {})
+
+ def test_update_new_address(self):
+ # Test adding a value in a non-indexed column
+ self.table.update({'name': 'Carlos'}, {'address': '99 Sohcahtoa Ct.'})
+ records = self.table.select({'address': '99 Sohcahtoa Ct.'})
+ self.assertEqual(len(records), 1)
+ self.assertEqual(records[0]['name'], 'Carlos')
+
+ def test_Update_Change_Address(self):
+ # Test changing a value in a non-indexed column
+ self.table.update({'name': 'Jose'}, {'address': '99 Sohcahtoa Ct.'})
+ records = self.table.select({'address': '99 Sohcahtoa Ct.'})
+ self.assertEqual(len(records), 1)
+ self.assertEqual(records[0]['name'], 'Jose')
+
+ def test_update_female_addresses(self):
+ # Test changing and adding simultaneously in a non-indexed column
+ self.table.update({'sex': 'f'}, {'address': '99 Sohcahtoa Ct.'})
+ records = self.table.select({'address': '99 Sohcahtoa Ct.'})
+ self.assertEqual(len(records), 2)
+
+
+ def test_update_change_phone(self):
+ # Test changing a value in an indexed column
+ records = self.table.select({'phone': '123-4567'})
+ self.assertEqual(len(records), 3) # Precondition
+
+ self.table.update({'name': 'Jose'}, {'phone': '111-5555'})
+ records = self.table.select({'phone': '123-4567'})
+ self.assertEqual(len(records), 2)
+ records = self.table.select({'phone': '111-5555'})
+ self.assertEqual(len(records), 1)
+ self.assertEqual(records[0]['name'], 'Jose')
+
+
+ def test_update_change_name(self):
+ # Test changing a value in a primary key column
+ records = self.table.select({'name': 'Jose'})
+ self.assertEqual(len(records), 1) # Precondition
+
+ self.table.update({'name': 'Jose'}, {'name': 'Marco'})
+ records = self.table.select({'name': 'Jose'})
+ self.assertEqual(len(records), 0)
+ records = self.table.select({'name': 'Marco'})
+ self.assertEqual(len(records), 1)
+
+
+ def test_update_name_conflict(self):
+ self.assertRaises(zodbtables.DuplicateError, self.table.update,
+ {'name':'Jose'}, {'name': 'Carlos'})
+
+
+ def test_delete_nothing(self):
+ old_count = len(self.table.select({}))
+ self.assertEqual(self.table.delete({'name': 'Edson'}), 0)
+ new_count = len(self.table.select({}))
+ self.assert_(old_count == new_count)
+
+
+ def test_delete_all(self):
+ count = len(self.table.select({}))
+ self.assert_(count > 0)
+ self.assertEqual(self.table.delete({}), count)
+ new_count = len(self.table.select({}))
+ self.assert_(new_count == 0)
+
+
+ def test_delete_one(self):
+ # Test deletion of one row
+ records = self.table.select({'name': 'Jose'})
+ self.assertEqual(len(records), 1) # Precondition
+ records = self.table.select({'phone': '123-4567'})
+ self.assertEqual(len(records), 3) # Precondition
+
+ count = self.table.delete({'name': 'Jose'})
+ self.assertEqual(count, 1)
+ records = self.table.select({'name': 'Jose'})
+ self.assertEqual(len(records), 0)
+ records = self.table.select({'phone': '123-4567'})
+ self.assertEqual(len(records), 2)
+
+
+ def test_delete_by_phone(self):
+ records = self.table.select({'phone': '123-4567'})
+ self.assertEqual(len(records), 3) # Precondition
+
+ count = self.table.delete({'phone': '123-4567'})
+ self.assertEqual(count, 3)
+ records = self.table.select({'phone': '123-4567'})
+ self.assertEqual(len(records), 0)
+ records = self.table.select({'name': 'Jose'})
+ self.assertEqual(len(records), 0)
+
+ # Make sure it didn't delete other data
+ records = self.table.select({'name': 'Maria'})
+ self.assertEqual(len(records), 1)
+
+ def test_select_partial_primary_key(self):
+ # Select by only one part of a primary key
+ schema = zodbtables.TableSchema()
+ schema.add('name', primary=1)
+ schema.add('id', primary=1)
+ table = zodbtables.Table(schema)
+ table.insert({'name': 'joe', 'id': 1})
+ table.insert({'name': 'john', 'id': 2})
+ records = table.select({'name': 'joe'})
+ self.assertEqual(len(records), 1)
+
+
+class ZODBTableTestsWithoutPrimaryKey(ZODBTableTests):
+ # Same tests but with no primary key. The absence of a primary
+ # key affects many branches of the code.
+ table_schema = zodbtables.TableSchema()
+ table_schema.add('name', indexed=1)
+ table_schema.add('sex', indexed=1)
+ table_schema.add('address')
+ table_schema.add('phone', indexed=1)
+
+ # Disabled tests
+ def test_insert_without_primary_key(self):
+ pass
+
+ def test_insert_duplicate(self):
+ pass
+
+ def test_update_name_conflict(self):
+ pass
+
+
+if __name__ == '__main__':
+ unittest.main()
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/testzodbtables.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testzope2fs.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testzope2fs.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testzope2fs.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,759 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Test of storing folders on the filesystem via ZODB
+
+$Id$
+"""
+
+import os
+import sys
+from shutil import rmtree
+import unittest
+from tempfile import mktemp
+from cStringIO import StringIO
+
+from OFS.Application import Application
+from OFS.Image import File, manage_addImage, manage_addFile
+from Products.PythonScripts.PythonScript import PythonScript
+from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
+
+from apelib.core.interfaces import OIDConflictError
+from apelib.zodb3.db import ApeDB
+from apelib.zodb3.storage import ApeStorage
+from apelib.zodb3.resource import StaticResource
+from apelib.zope2.mapper import load_conf
+from apelib.fs.interfaces import FSWriteError
+from apelib.fs.connection import FSConnection
+from apelib.tests.zope2testbase import Zope2TestBase, Folder
+
+
+try:
+ __file__
+except NameError:
+ __file__ = os.path.abspath(sys.argv[0])
+
+tmpdir = mktemp()
+
+conf = None
+
+
+class Zope2FSTests (unittest.TestCase, Zope2TestBase):
+
+ annotation_prefix = '.'
+
+ def setUp(self):
+ self.db, self.conn = self.open_database()
+ self.conf = conf
+ self.path = tmpdir
+ c = self.db.open()
+ try:
+ if not c.root().has_key('Application'):
+ from OFS.Application import Application
+ c.root()['Application'] = Application()
+ get_transaction().commit()
+ finally:
+ c.close()
+ get_transaction().begin()
+ self.clear_caches()
+
+ def tearDown(self):
+ get_transaction().abort()
+ if self.db is not None:
+ self.db.close()
+ rmtree(self.path)
+
+ def open_database(self):
+ global conf
+ if conf is None:
+ conf = load_conf('filesystem')
+ if not os.path.exists(tmpdir):
+ os.mkdir(tmpdir)
+ conn = FSConnection(tmpdir, annotation_prefix=self.annotation_prefix)
+ conns = {'fs': conn}
+ resource = StaticResource(conf)
+ storage = ApeStorage(resource, conns)
+ db = ApeDB(storage, resource, cache_size=0)
+ return db, conn
+
+ def clear_caches(self):
+ """Clears caches after a filesystem write.
+ """
+ self.conn.afs.clear_cache()
+
+ def test_classification_preservation(self):
+ # Ensure that classification doesn't get forgotten.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ f2 = Folder()
+ f2.id = 'Christmas'
+ f._setObject(f2.id, f2, set_owner=0)
+ get_transaction().commit()
+
+ f3 = Folder()
+ f3.id = 'Eve'
+ f2._setObject(f3.id, f3, set_owner=0)
+ get_transaction().commit()
+
+ for folder in (f, f2, f3):
+ text = self.conn.read_annotation(folder._p_oid, 'classification')
+ self.assert_(text.find('class_name=OFS.Folder.Folder') >= 0)
+ finally:
+ conn.close()
+
+
+ def test_ignore_mismatched_id(self):
+ # Verify that FSAutoID doesn't care if the ID of an item
+ # doesn't match what the folder thinks the item's ID should
+ # be.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ ob = app.Holidays
+ ob._setId('HolidayCalendar')
+ get_transaction().commit()
+ finally:
+ conn.close()
+
+
+ def test_reuse_path(self):
+ # Verifies that ApeConnection doesn't trip over reuse of a path that's
+ # no longer in use.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ f = None # Forget the reference to folder
+ app._delObject('Holidays')
+ get_transaction().commit()
+
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+ finally:
+ conn.close()
+
+
+ def test_automatic_page_template_extension(self):
+ text = '<span tal:content="string:Hello">example</span>'
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ template = ZopePageTemplate('template', text)
+ app._setObject(template.id, template, set_owner=0)
+ get_transaction().commit()
+
+ dir = self.conn.basepath
+ names = os.listdir(dir)
+ self.assert_('template.html' in names, names)
+ self.assert_('template' not in names, names)
+ finally:
+ conn.close()
+
+
+ def test_preserve_names_without_extensions(self):
+ # Verifies that FSConnection retains original object names,
+ # even though the files might be stored with extensions.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'folder'
+ app._setObject(f.id, f, set_owner=0)
+ for n in range(3):
+ script = PythonScript('script%d' % n)
+ script.write('##title=test script\nreturn "OK"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ f = app.folder
+ for n in range(3):
+ self.assert_(hasattr(f, 'script%d' % n))
+ self.assert_(not hasattr(f, 'script%d.py' % n))
+ # white box test: verify the scripts were actually stored
+ # with .py extensions.
+ dir = os.path.join(self.conn.basepath, 'folder')
+ names = os.listdir(dir)
+ for n in range(3):
+ self.assert_(('script%d.py' % n) in names, names)
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_preserve_names_with_extensions(self):
+ # Verifies that FSConnection retains original object names
+ # even though the object names already have extensions.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'folder'
+ app._setObject(f.id, f, set_owner=0)
+ for n in range(3):
+ script = PythonScript('script%d.py' % n)
+ script.write('##title=test script\nreturn "OK"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ f = app.folder
+ for n in range(3):
+ self.assert_(hasattr(f, 'script%d.py' % n))
+ self.assert_(not hasattr(f, 'script%d' % n))
+ # white box test: verify the scripts were actually stored
+ # with .py extensions.
+ dir = os.path.join(self.conn.basepath, 'folder')
+ names = os.listdir(dir)
+ for n in range(3):
+ self.assert_(('script%d.py' % n) in names, names)
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_auto_rename_on_extension_conflict(self):
+ # When you create a Python Script called "script0", Ape adds a
+ # .py extension. If, in a second transaction, you add
+ # "script0.py", Ape must rename the current "script0.py" to
+ # "script0" to make room for the new "script0.py".
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'folder'
+ app._setObject(f.id, f, set_owner=0)
+
+ # Can't write to 'script0' then 'script0.py'.
+ script = PythonScript('script0')
+ script.write('##title=test script\nreturn "OK"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ dir = os.path.join(self.conn.basepath, 'folder')
+ names = os.listdir(dir)
+ self.assert_(('script0.py') in names, names)
+ self.assert_(('script0') not in names, names)
+
+ # script0.py already exists, so Ape should automatically rename.
+ script = PythonScript('script0.py')
+ script.write('##title=test script\nreturn "Hello, world!"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ # Did it write them correctly?
+ text = open(os.path.join(dir, 'script0')).read()
+ self.assert_(text.find('OK') > 0, text)
+ self.assert_(text.find('Hello, world!') < 0, text)
+ text = open(os.path.join(dir, 'script0.py')).read()
+ self.assert_(text.find('OK') < 0, text)
+ self.assert_(text.find('Hello, world!') > 0, text)
+ finally:
+ conn.close()
+
+
+ def test_non_conflicting_name_extensions1(self):
+ # Verifies that FSConnection can write to 'script0.py' then 'script0'
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'folder'
+ app._setObject(f.id, f, set_owner=0)
+
+ # It's OK to write to 'script0.py' then 'script0'.
+ script = PythonScript('script0.py')
+ script.write('##title=test script\nreturn "OK"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ script = PythonScript('script0')
+ script.write('##title=test script\nreturn "Hello, world!"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ dir = os.path.join(self.conn.basepath, 'folder')
+ names = os.listdir(dir)
+ self.assert_(('script0.py') in names, names)
+ self.assert_(('script0') in names, names)
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ f = app.folder
+ self.assertEqual(f['script0.py'](), 'OK')
+ self.assertEqual(f['script0'](), 'Hello, world!')
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_non_conflicting_name_extensions2(self):
+ # Verifies that FSConnection can write to 'script0.py' and 'script0'
+ # at the same time
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'folder'
+ app._setObject(f.id, f, set_owner=0)
+
+ # It's OK to write to 'script0.py' then 'script0'.
+ script = PythonScript('script0.py')
+ script.write('##title=test script\nreturn "OK"')
+ f._setObject(script.id, script, set_owner=0)
+ script = PythonScript('script0')
+ script.write('##title=test script\nreturn "Hello, world!"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ f = app.folder
+ self.assertEqual(f['script0.py'](), 'OK')
+ self.assertEqual(f['script0'](), 'Hello, world!')
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_non_conflicting_name_extensions3(self):
+ # Verifies that FSConnection can write to 'script0.py'
+ # then 'script0.dtml', then 'script0'.
+ # Then verifies that removal of items works correctly.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'folder'
+ app._setObject(f.id, f, set_owner=0)
+
+ script = PythonScript('script0.py')
+ script.write('##title=test script\nreturn "OK"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ script = PythonScript('script0.dtml')
+ script.write('##title=test script\nreturn "No DTML here"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ script = PythonScript('script0')
+ script.write('##title=test script\nreturn "Hello, world!"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+
+ dir = os.path.join(self.conn.basepath, 'folder')
+ names = os.listdir(dir)
+ self.assert_(('script0.py') in names, names)
+ self.assert_(('script0.dtml') in names, names)
+ self.assert_(('script0') in names, names)
+
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ f2 = app2.folder
+ self.assertEqual(f2['script0.py'](), 'OK')
+ self.assertEqual(f2['script0.dtml'](), 'No DTML here')
+ self.assertEqual(f2['script0'](), 'Hello, world!')
+ finally:
+ get_transaction().abort()
+ conn2.close()
+
+ f._delObject('script0.py')
+ get_transaction().commit()
+ names = os.listdir(dir)
+ self.assert_(('script0.py') not in names, names)
+ self.assert_(('script0.dtml') in names, names)
+ self.assert_(('script0') in names, names)
+
+ f._delObject('script0')
+ get_transaction().commit()
+ names = os.listdir(dir)
+ self.assert_(('script0.py') not in names, names)
+ self.assert_(('script0.dtml') in names, names)
+ self.assert_(('script0') not in names, names)
+
+ script = PythonScript('script0')
+ script.write('##title=test script\nreturn "Hello, world!"')
+ f._setObject(script.id, script, set_owner=0)
+ get_transaction().commit()
+ names = os.listdir(dir)
+ self.assert_(('script0.py') not in names, names)
+ self.assert_(('script0.dtml') in names, names)
+ self.assert_(('script0') in names, names)
+
+ f._delObject('script0.dtml')
+ get_transaction().commit()
+ names = os.listdir(dir)
+ self.assert_(('script0.py') not in names, names)
+ self.assert_(('script0.dtml') not in names, names)
+ self.assert_(('script0') in names, names)
+ finally:
+ conn.close()
+
+
+ def test_image_extension(self):
+ # Verify that a new image is stored with the correct extension.
+ path = os.path.join(os.path.dirname(__file__), 'correct.png')
+ f = open(path, 'rb')
+ try:
+ data = f.read()
+ finally:
+ f.close()
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ manage_addImage(app, 'image', StringIO(data))
+ get_transaction().commit()
+
+ self.assertEqual(app.image.data, data)
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ self.assertEqual(app.image.data, data)
+ finally:
+ conn2.close()
+
+ dir = self.conn.basepath
+ names = os.listdir(dir)
+ self.assert_(('image.png') in names, names)
+ self.assert_(('image') not in names, names)
+ finally:
+ conn.close()
+
+
+ def test_corrected_file_extension(self):
+ # Verify that certain content_types use the correct filename
+ # extension.
+ data = 'Hello, world!'
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ manage_addFile(app, 'hello', StringIO(data),
+ content_type='text/plain')
+ manage_addFile(app, 'world.dat', StringIO(data),
+ content_type='text/plain')
+ manage_addFile(app, 'binary_file', StringIO(data),
+ content_type='application/octet-stream')
+ get_transaction().commit()
+
+ dir = self.conn.basepath
+ names = os.listdir(dir)
+ self.assert_(('hello.txt') in names, names)
+ self.assert_(('world.dat') in names, names)
+ self.assert_(('hello') not in names, names)
+ self.assert_(('binary_file') in names, names)
+ finally:
+ conn.close()
+
+
+ def test_guess_type_based_on_extension(self):
+ # Verify Zope chooses the right object type for
+ # a new object.
+ # White box test.
+ dir = self.conn.basepath
+ f = open(os.path.join(dir, 'test.py'), 'wt')
+ f.write('return "Ok!"')
+ f.close()
+ self.clear_caches()
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ self.assert_(hasattr(app, 'test.py'))
+ self.assert_(isinstance(app['test.py'], PythonScript))
+ self.assertEqual(app['test.py'](), 'Ok!')
+ finally:
+ conn.close()
+
+
+ def test_guess_type_with_chopped_extension(self):
+ # Verify that even though the extension gets stripped off
+ # in Zope, Zope still sees the object as it should.
+ # White box test.
+ dir = self.conn.basepath
+ f = open(os.path.join(dir, 'test.py'), 'wt')
+ f.write('return "Ok!"')
+ f.close()
+
+ f = open(os.path.join(dir, self.conn.afs.annotation_prefix
+ + 'properties'), 'wt')
+ f.write('[object_names]\ntest\n')
+ f.close()
+ self.clear_caches()
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ self.assert_(hasattr(app, 'test'))
+ self.assert_(isinstance(app.test, PythonScript))
+ self.assertEqual(app.test(), 'Ok!')
+ finally:
+ conn.close()
+
+
+ def test_fallback_to_file(self):
+ # Verify Zope uses a File object for unrecognized files on
+ # the filesystem. White box test.
+ data = 'data goes here'
+ dir = self.conn.basepath
+ f = open(os.path.join(dir, 'test'), 'wt')
+ f.write(data)
+ f.close()
+ self.clear_caches()
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ self.assert_(hasattr(app, 'test'))
+ self.assert_(isinstance(app['test'], File))
+ self.assertEqual(str(app['test']), data)
+ finally:
+ conn.close()
+
+
+ def test_default_property_schema(self):
+ # Verify Zope uses the default property schema when no properties
+ # are set.
+ dir = self.conn.basepath
+ os.mkdir(os.path.join(dir, 'test'))
+ self.clear_caches()
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ self.assert_(hasattr(app, 'test'))
+ self.assert_(isinstance(app['test'], Folder))
+ self.assertEqual(app['test'].title, '')
+ props = app['test']._properties
+ for p in props:
+ if p['id'] == 'title':
+ break
+ else:
+ self.fail('No title property found')
+ finally:
+ conn.close()
+
+
+ def test_remainder_storage(self):
+ # Verify that FSConnection puts the remainder in the properties file
+ conn = self.db.open()
+ try:
+ content = 'tacked_on_data'
+ app = conn.root()['Application']
+ app._stowaway = content
+ get_transaction().commit()
+
+ # Verify the ability to load it
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ self.assertEqual(app2._stowaway, content)
+ finally:
+ conn2.close()
+
+ # Verify the stowaway is in the properties file.
+ dir = self.conn.basepath
+ p = os.path.join(
+ dir, self.conn.afs.annotation_prefix + 'properties')
+ f = open(p, 'rt')
+ data = f.read()
+ f.close()
+ self.assert_(data.find('_stowaway') >= 0)
+ finally:
+ conn.close()
+
+
+ def test_dotted_names(self):
+ # FSConnection should allow dotted names that don't look like
+ # property or remainder files.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = '.Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ f2 = Folder()
+ f2.id = '.Holidays.properties.dat'
+ app._setObject(f2.id, f2, set_owner=0)
+ get_transaction().commit()
+ finally:
+ conn.close()
+
+
+ def test_guess_file_content_type(self):
+ # Verify that file content type guessing happens.
+ data = '<html><body>Cool stuff</body></html>'
+ dir = self.conn.basepath
+ f = open(os.path.join(dir, 'testobj'), 'wt')
+ f.write(data)
+ f.close()
+
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ self.assert_(hasattr(app, 'testobj'))
+ self.assertEqual(app.testobj.content_type, 'text/html')
+ finally:
+ conn.close()
+
+
+ def test_write_to_root(self):
+ # Verify it's possible to write to the _root object as well as
+ # the Application object without either one stomping on each
+ # other's data.
+ conn = self.db.open()
+ conn2 = None
+ try:
+ root = conn.root()
+ app = root['Application']
+ root['foo'] = Folder()
+ root['foo'].id = 'foo'
+ app.bar = Folder('bar')
+ app.bar.id = 'bar'
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ root = conn2.root()
+ app = root['Application']
+ self.assert_(root.has_key('foo'))
+ self.assert_(hasattr(app, 'bar'))
+ finally:
+ conn.close()
+ if conn2 is not None:
+ conn2.close()
+
+
+ def test_open_existing(self):
+ # Verifies that opening an existing database finds the same
+ # data.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ app.test_attribute = '123'
+ get_transaction().commit()
+ finally:
+ conn.close()
+
+ # Close the database and open a new one pointing at the same
+ # directory.
+ self.db.close()
+ self.db = None
+ self.db, self.conn = self.open_database()
+ conn = self.db.open()
+ try:
+ root = conn.root()
+ app = root['Application']
+ self.assertEqual(app.test_attribute, '123')
+ finally:
+ conn.close()
+
+
+ def test_no_clobber_on_open(self):
+ # Opening a database with no "_root" shouldn't clobber the
+ # existing contents.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'bar'
+ app._setObject(f.id, f)
+ get_transaction().commit()
+ finally:
+ conn.close()
+ self.db.close()
+ self.db = None
+
+ # Destroy the _root and the annotations at the app root.
+ basepath = self.conn.basepath
+ root_p = os.path.join(basepath, '_root')
+ if os.path.exists(root_p):
+ rmtree(root_p)
+ paths = self.conn.afs.get_annotation_paths(basepath)
+ for path in paths:
+ if os.path.exists(path):
+ os.remove(path)
+
+ # Now look for the 'bar' folder.
+ self.db, self.conn = self.open_database()
+ conn = self.db.open()
+ try:
+ root = conn.root()
+ app = root['Application']
+ self.assertEqual(app.bar.id, 'bar')
+ finally:
+ conn.close()
+
+ def test_start_with_empty_database(self):
+ # A new database should not have an Application.
+ # Destroy the _root and the annotations at the app root.
+ self.db.close()
+ self.db = None
+ basepath = self.conn.basepath
+ rmtree(basepath)
+ os.mkdir(basepath)
+ self.db, self.conn = self.open_database()
+ conn = self.db.open()
+ try:
+ root = conn.root()
+ self.assert_(not root.has_key('Application'))
+ finally:
+ conn.close()
+
+ def test_store_unlinked(self):
+ # Storing an object not linked to any parents
+ # shouldn't cause problems.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'bar'
+ app._setObject(f.id, f)
+ get_transaction().commit(1)
+ app._delObject(f.id)
+ get_transaction().commit()
+ finally:
+ conn.close()
+
+
+
+class Zope2FSUnderscoreTests (Zope2FSTests):
+ annotation_prefix = '_'
+
+
+if __name__ == '__main__':
+ unittest.main()
+
Property changes on: Products.Ape/trunk/lib/apelib/tests/testzope2fs.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/testzope2sql.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/testzope2sql.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/testzope2sql.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,140 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Test of storing objects in relational databases via ZODB
+
+$Id$
+"""
+
+import unittest
+import sys
+
+from apelib.zodb3.db import ApeDB
+from apelib.zodb3.storage import ApeStorage
+from apelib.zodb3.resource import StaticResource
+from apelib.zope2.mapper import load_conf
+from apelib.tests.zope2testbase import Zope2TestBase
+
+
+conf = None
+
+class Zope2SQLTests (Zope2TestBase):
+
+ dbapi_module = None # Name of the Database API module (required)
+ class_name = None
+ connect_expression = ''
+
+ def get_connection(self):
+ pos = self.class_name.rfind('.')
+ m = self.class_name[:pos]
+ cn = self.class_name[pos + 1:]
+ c = getattr(__import__(m, {}, {}, ('__doc__',)), cn)
+ return c(self.dbapi_module, self.connect_expression, prefix="apetest_")
+
+ def setUp(self):
+ global conf
+ if conf is None:
+ conf = load_conf('sql')
+ conn = self.get_connection()
+ self.conf = conf
+ resource = StaticResource(self.conf)
+ self.conns = {'db': conn}
+ storage = ApeStorage(resource, self.conns, clear_all=1,
+ debug_conflicts=1)
+ self.storage = storage
+ self.assertEqual(conn.transaction_started, False)
+ self.db = ApeDB(storage, resource)
+ try:
+ c = self.db.open()
+ try:
+ if not c.root().has_key('Application'):
+ from OFS.Application import Application
+ c.root()['Application'] = Application()
+ get_transaction().commit()
+ finally:
+ get_transaction().abort()
+ c.close()
+ except:
+ self.db.close()
+ raise
+ self.assertEqual(conn.transaction_started, False)
+
+ def clear(self):
+ self.storage.init_databases(clear_all=1)
+
+ def tearDown(self):
+ get_transaction().abort()
+ self.clear()
+ self.db.close()
+
+ def test_connect(self):
+ # Tests the setUp/tearDown methods
+ pass
+
+ def test_commit_transaction_after_read(self):
+ # After a read, the database transaction should not remain
+ # open.
+ conn = self.db.open()
+ try:
+ conn._resetCache()
+ self.assertEqual(self.conns['db'].transaction_started, False)
+ app = conn.root()['Application']
+ app.getId()
+ self.assertEqual(self.conns['db'].transaction_started, True)
+ get_transaction().commit()
+ self.assertEqual(self.conns['db'].transaction_started, False)
+ finally:
+ conn.close()
+
+
+class PsycopgTests (Zope2SQLTests, unittest.TestCase):
+ dbapi_module = 'psycopg'
+ class_name = 'apelib.sql.postgresql.PostgreSQLConnection'
+ connect_expression = 'connect("")'
+
+
+class MySQLTests (Zope2SQLTests, unittest.TestCase):
+ dbapi_module = 'MySQLdb'
+ class_name = 'apelib.sql.mysql.MySQLConnection'
+ connect_expression = 'connect(db="ape")'
+
+
+def test_suite():
+ """Makes a test suite for the available databases."""
+ suite = unittest.TestSuite()
+ for k, v in globals().items():
+ mname = getattr(v, 'dbapi_module', None)
+ if mname is not None:
+ try:
+ __import__(mname, {}, {}, ('__doc__',))
+ except ImportError:
+ sys.stderr.write('Warning: could not import %s. '
+ 'Skipping %s.\n'
+ % (repr(mname), k))
+ else:
+ case = v('test_connect')
+ conn = case.get_connection()
+ try:
+ conn.connect()
+ conn.close()
+ except conn.module.Error:
+ sys.stderr.write('Warning: could not open a '
+ 'connection using %s. Skipping %s.\n'
+ % (repr(mname), k))
+ else:
+ suite.addTest(unittest.makeSuite(v, 'test'))
+ return suite
+
+
+if __name__ == '__main__':
+ unittest.main(defaultTest='test_suite')
Property changes on: Products.Ape/trunk/lib/apelib/tests/testzope2sql.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/tests/zope2testbase.py
===================================================================
--- Products.Ape/trunk/lib/apelib/tests/zope2testbase.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/tests/zope2testbase.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,838 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Test of storing various kinds of objects
+
+$Id$
+"""
+
+from cStringIO import StringIO
+import time
+from types import ListType, TupleType
+
+from Acquisition import aq_base
+from Persistence import Persistent
+from ZODB import POSException
+from Persistence import PersistentMapping
+from OFS.Folder import Folder
+from OFS.ObjectManager import ObjectManager
+from OFS.SimpleItem import SimpleItem
+from OFS.Image import manage_addFile
+from OFS.DTMLMethod import DTMLMethod
+from AccessControl.User import User, UserFolder
+from Products.PythonScripts.PythonScript import PythonScript
+from Products.ZSQLMethods.SQL import manage_addZSQLMethod
+from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
+from DateTime import DateTime
+
+from apelib.core.interfaces import OIDConflictError
+
+
+class TestFolder(Folder):
+
+ meta_type = 'Zope2FS Test Folder'
+
+ def __init__(self, title):
+ self.title = title
+
+
+class TestObjectManager(ObjectManager):
+
+ meta_type = 'Zope2FS Test ObjectManager'
+
+ def __init__(self, title):
+ self.title = title
+
+
+class TestFile(SimpleItem):
+
+ meta_type = 'Zope2FS Test File'
+
+ def __init__(self, content):
+ self.content = content
+
+
+class FixedSchemaTestFolder(Folder):
+
+ _properties = (
+ {'id': 'mystring', 'type': 'string', 'mode': 'w'},
+ {'id': 'myfloat', 'type': 'float', 'mode': 'w'},
+ {'id': 'myint', 'type': 'int', 'mode': 'w'},
+ {'id': 'mylong', 'type': 'long', 'mode': 'w'},
+ {'id': 'mydate', 'type': 'date', 'mode': 'w'},
+ {'id': 'mytext', 'type': 'text', 'mode': 'w'},
+ {'id': 'myboolean0', 'type': 'boolean', 'mode': 'w'},
+ {'id': 'myboolean1', 'type': 'boolean', 'mode': 'w'},
+ )
+
+ mystring = 'abc'
+ myfloat = 3.14
+ myint = -100
+ mylong = 2L ** 40 + 10
+ mydate = DateTime('2004/03/25')
+ mytext = '987\n654\n321\n'
+ myboolean0 = 0
+ myboolean1 = 1
+
+
+class Zope2TestBase:
+
+ def test_load(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ app.getId()
+ finally:
+ conn.close()
+
+ def test_store(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ f2 = Folder()
+ f2.id = 'Christmas'
+ f._setObject(f2.id, f2, set_owner=0)
+ get_transaction().commit()
+
+ f3 = Folder()
+ f3.id = 'Eve'
+ f2._setObject(f3.id, f3, set_owner=0)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ self.assert_(hasattr(app, 'Holidays'))
+ self.assert_(hasattr(app.Holidays, 'Christmas'))
+ self.assert_(hasattr(app.Holidays.Christmas, 'Eve'))
+ # Verify the same OID is seen in both connections.
+ self.assertEqual(app.Holidays._p_oid, f._p_oid)
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+ def test_anyfolder_storage(self):
+ # Try to store a folderish object of an otherwise unknown class
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ f2 = TestFolder("New Year's Eve")
+ f2.id = 'NewYear'
+ f._setObject(f2.id, f2, set_owner=0)
+ get_transaction().commit()
+
+ # Verify the object is in its own database record
+ self.assertNotEqual(f2._p_oid, None)
+ f2._p_changed = None
+ self.assert_(f2._p_changed is None)
+
+ # Verify the ability to load it
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ ff = app2.Holidays.NewYear
+ self.assertEqual(ff.title, "New Year's Eve")
+ self.assertEqual(ff.__class__, TestFolder)
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_anyfolder_without_properties_storage(self):
+ # Try to store a folderish object that does not implement
+ # PropertyManager (tests OptionalSerializer)
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = TestObjectManager("* Holiday Calendar *")
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ # Verify the ability to load it
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ ff = app2.Holidays
+ self.assertEqual(ff.title, "* Holiday Calendar *")
+ self.assertEqual(ff.__class__, TestObjectManager)
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_anyfile_storage(self):
+ # Try to store a fileish object of an otherwise unknown class
+ conn = self.db.open()
+ try:
+ content = 'insert wise expression here'
+
+ app = conn.root()['Application']
+ f = TestFile(content)
+ f.id = 'testitem'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ # Verify the object is in its own database record
+ self.assertNotEqual(f._p_oid, None)
+ f._p_changed = None
+ self.assert_(f._p_changed is None)
+
+ # Verify the ability to load it
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ ff = app2.testitem
+ self.assertEqual(ff.content, content)
+ self.assertEqual(ff.__class__, TestFile)
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_store_properties(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ f.title = 'Holiday Calendar'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ f._setProperty('pi', 3.14, 'float')
+ f._setProperty('stuff', ['a', 'bc', 'd'], 'lines')
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ self.assert_(hasattr(app, 'Holidays'))
+ got = 0
+ for k, v in app.Holidays.propertyItems():
+ if k == 'title':
+ got += 1
+ self.assertEqual(v, 'Holiday Calendar')
+ elif k == 'pi':
+ got += 1
+ self.assertEqual(v, 3.14)
+ elif k == 'stuff':
+ got += 1
+ self.assertEqual(tuple(v), ('a', 'bc', 'd'))
+ self.assertEqual(got, 3)
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_store_selection_properties(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ f.title = 'Holiday Calendar'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ f._setProperty('choices', ['alpha', 'omega', 'delta'], 'lines')
+ f._setProperty('greek', 'choices', 'multiple selection')
+ f._setProperty('hebrew', 'choices', 'selection')
+ f.greek = ['alpha', 'omega']
+ f.hebrew = 'alpha'
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ self.assert_(hasattr(app, 'Holidays'))
+ got = 0
+ for k, v in app.Holidays.propertyItems():
+ if k == 'greek':
+ got += 1
+ self.assertEqual(tuple(v), ('alpha', 'omega'))
+ if k == 'hebrew':
+ got += 1
+ self.assertEqual(v, 'alpha')
+ self.assertEqual(got, 2)
+ # Be sure the select_variable got restored.
+ dict = app.Holidays.propdict()
+ self.assertEqual(dict['greek']['select_variable'], 'choices')
+ self.assertEqual(dict['hebrew']['select_variable'], 'choices')
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+
+ def test_store_property_types(self):
+ # Test that Ape restores properties to the correct types.
+ from DateTime import DateTime
+ now = DateTime()
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ f._setProperty('string1', 's', 'string')
+ f._setProperty('float1', 3.14, 'float')
+ f._setProperty('int1', 5, 'int')
+ f._setProperty('long1', 2L**33, 'long')
+ f._setProperty('date1', now, 'date')
+ f._setProperty('date2', now, 'date_international')
+ f._setProperty('text1', 'abc\ndef', 'text')
+ f._setProperty('boolean0', 0, 'boolean')
+ f._setProperty('boolean1', 1, 'boolean')
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ f2 = app2.Holidays
+ self.assertEqual(f2.string1, 's')
+ self.assertEqual(f2.float1, 3.14)
+ self.assertEqual(f2.int1, 5)
+ self.assertEqual(f2.long1, 2L**33)
+ self.assertEqual(f2.date1.ISO(), now.ISO())
+ self.assertEqual(f2.date2.ISO(), now.ISO())
+ self.assertEqual(f2.text1, 'abc\ndef')
+ self.assertEqual(f2.boolean0, 0)
+ self.assertEqual(f2.boolean1, 1)
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_store_fixed_schema(self):
+ # Test that Ape restores properties of fixed schemas correctly.
+ # (This is a pretty grueling test.)
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = FixedSchemaTestFolder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+ f.mystring = f.mystring * 2
+ f.myint = f.myint * 2
+ f.mylong = f.mylong * 2
+ f.myfloat = f.myfloat * 2
+ f.mydate = f.mydate + 1
+ f.mytext = f.mytext * 2
+ f.myboolean0 = not f.myboolean0
+ f.myboolean1 = not f.myboolean1
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ f2 = app2.Holidays
+ self.assertEqual(f2.mystring, 'abcabc')
+ self.assertEqual(f2.myint, -200)
+ self.assertEqual(f2.mylong, 2L ** 41 + 20)
+ self.assertEqual(f2.myfloat, 6.28)
+ self.assertEqual(f2.mydate, DateTime('2004/03/26'))
+ self.assertEqual(f2.mytext, '987\n654\n321\n987\n654\n321\n')
+ self.assertEqual(f2.myboolean0, 1)
+ self.assertEqual(f2.myboolean1, 0)
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_store_user_folder(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ if hasattr(app, 'acl_users'):
+ app._delObject('acl_users')
+ f = UserFolder()
+ f.id = 'acl_users'
+ app._setObject(f.id, f, set_owner=0)
+ f._doAddUser('ned', 'abcdefg', ('Serf', 'Knight', 'King'), ())
+ f._doAddUser('joe', '123', ('Geek',), ())
+ get_transaction().commit()
+
+ # Be sure ZODB sees the unmanaged persistent objects
+ u = f.data['ned']
+ self.assertEqual(f.data._p_oid, 'unmanaged')
+ self.assertEqual(u._p_oid, 'unmanaged')
+
+ # Make some changes
+ u.roles = ('Knight', 'King')
+ u.domains = ('localhost',)
+ del f.data['joe'] # Test user deletion
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ ff = app.acl_users
+ self.assert_(aq_base(app.__allow_groups__) is aq_base(ff))
+ self.assertEqual(len(ff.data), 1)
+ user = ff.data['ned']
+ self.assertEqual(user.name, 'ned')
+ self.assertEqual(len(user.roles), 2)
+ self.assert_('Knight' in user.roles)
+ self.assert_('King' in user.roles)
+ self.assertEqual(user.domains, ('localhost',))
+ self.assert_(user is not u)
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_new_object_conflict_detection(self):
+ # Verify a new object won't overwrite existing objects by accident
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ app.some_attr = 'stuff'
+ conn._set_serial(app, '\0' * 8) # Pretend that it's new
+ self.assertRaises(OIDConflictError, get_transaction().commit)
+ finally:
+ conn.close()
+
+
+ def test_rename(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+
+ # Do what manage_rename does, without the security checks
+ ob = app.Holidays.aq_base
+ app._delObject('Holidays')
+ ob._setId('HolidayCalendar')
+ app._setObject(ob.id, ob, set_owner=0)
+ get_transaction().commit()
+
+ self.assert_(hasattr(app, 'HolidayCalendar'))
+ self.assert_(not hasattr(app, 'Holidays'))
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ self.assert_(hasattr(app, 'HolidayCalendar'))
+ self.assert_(not hasattr(app, 'Holidays'))
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_large_file(self):
+ # Verify that 256K file objects can be serialized/deserialized.
+ # Zope splits files larger than 64K into chunks.
+ data = 'data' * 65536
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ manage_addFile(app, 'file', StringIO(data))
+ get_transaction().commit()
+
+ self.assertEqual(str(app.file), data)
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ self.assertEqual(str(app.file), data)
+ get_transaction().abort()
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_file_preserves_content_type(self):
+ # Verify that a file's content_type is preserved.
+ # Note that there is some contention between content_type
+ # guessing and the content_type property.
+ data = (
+ '\n'
+ 'This is not just text\n'
+ 'In a frivolous file test\n'
+ 'But a wise practice.\n'
+ )
+ ct = 'text/x-haiku'
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ manage_addFile(app, 'file', StringIO(data))
+ app.file.content_type = ct
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ self.assertEqual(app.file.content_type, ct)
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_page_template(self):
+ text = '<span tal:content="string:Hello">example</span>'
+ expected = '<span>Hello</span>'
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ template = ZopePageTemplate('template', text)
+ app._setObject(template.id, template, set_owner=0)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ res = app.template()
+ self.assertEqual(res.strip(), expected)
+ self.assert_(not app.template._p_changed)
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_python_script(self, with_proxy_roles=0):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ script = PythonScript('script')
+ script.write('##title=test script\nreturn "OK"')
+ script._makeFunction()
+ app._setObject(script.id, script, set_owner=0)
+ if with_proxy_roles:
+ # set a proxy role and verify nothing breaks
+ script._proxy_roles = ('System Administrator',)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ script = app.script
+ self.assertEqual(script.title, 'test script')
+ res = script()
+ self.assertEqual(res, 'OK')
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_python_script_with_proxy_roles(self):
+ # This once failed because PythonScripts check proxy roles
+ # on calls to write().
+ self.test_python_script(with_proxy_roles=1)
+
+
+ def test_dtml_method(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ m = DTMLMethod()
+ m._setId('m')
+ method_body = '''All <dtml-var expr="'OK'">.'''
+ m.manage_edit(method_body, 'test method')
+ app._setObject(m.getId(), m, set_owner=0)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ m = app.m
+ self.assertEqual(m.title, 'test method')
+ res = m()
+ self.assertEqual(res, 'All OK.')
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_zsql_method(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ template = 'SELECT <dtml-var foo> from <dtml-var bar>'
+ manage_addZSQLMethod(app, 'm', 'test sql', 'none', 'foo bar',
+ template)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app = conn2.root()['Application']
+ m = app.m
+ self.assertEqual(m.title, 'test sql')
+ self.assertEqual(m._arg._keys, ['foo', 'bar'])
+ self.assertEqual(m.src, template)
+ finally:
+ conn2.close()
+
+ finally:
+ conn.close()
+
+
+ def test_security_attributes(self):
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ f = app.Holidays
+ u = UserFolder()
+ u.id = 'acl_users'
+ f._setObject(u.id, u, set_owner=0)
+ u._doAddUser('shane', 'abcdefg', ('Elder',), ())
+
+ f._owner = (['Holidays', 'acl_users'], 'shane')
+ f.__ac_roles__ = ['Elder', 'Manager', 'Missionary']
+ f.__ac_local_roles__ = {'shane': ['Missionary']}
+ f._proxy_roles = ['Manager']
+ f._View_Permission = ('Owner', 'Elder')
+ f._Add_Folders_Permission = ['Elder']
+
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ # Verify that loading works
+ app = conn2.root()['Application']
+ f2 = app.Holidays
+ user = f2.getOwner()
+ self.assertEqual(user.getUserName(), 'shane')
+ self.assert_('Elder' in user.getRoles())
+ self.assertEqual(
+ list(f2.__ac_roles__), ['Elder', 'Manager', 'Missionary'])
+
+ roles = {}
+ for role in list(user.getRolesInContext(f2)):
+ if role != 'Authenticated' and role != 'Anonymous':
+ roles[role] = 1
+ self.assertEqual(roles, {'Elder':1, 'Missionary':1})
+ self.assertEqual(tuple(f2._proxy_roles), ('Manager',))
+
+ self.assert_(isinstance(f2._View_Permission, TupleType),
+ "View permission should not be acquired")
+ self.assert_(isinstance(f2._Add_Folders_Permission, ListType),
+ "Add Folders permission should be acquired")
+ roles = {}
+ for role in list(f2._View_Permission):
+ roles[role] = 1
+ self.assertEqual(roles, {'Elder':1, 'Owner':1})
+
+ # Write some changes to verify that changes work
+ f2._owner = None
+ del f2._proxy_roles
+ f2.__ac_roles__ += ('Teacher',)
+ get_transaction().commit()
+ finally:
+ conn2.close()
+
+ # Make sure the changes are seen
+ conn.sync()
+ self.assert_(f.getOwner() is None, f.getOwner())
+ self.assert_(not hasattr(f, '_proxy_roles'))
+ self.assertEqual(
+ list(f.__ac_roles__),
+ ['Elder', 'Manager', 'Missionary', 'Teacher'])
+ finally:
+ conn.close()
+
+
+ def test_mod_time(self):
+ # Verify _p_mtime is within a reasonable range.
+ conn = self.db.open()
+ try:
+ now = time.time()
+ app = conn.root()['Application']
+ app.title = 'Testing'
+ get_transaction().commit()
+ self.assert_(app._p_mtime > now - 10)
+ self.assert_(app._p_mtime < now + 10)
+ finally:
+ conn.close()
+
+
+ def test_write_with_ghosts(self):
+ # It should be possible to write a container even if one
+ # or more of its subobjects are ghosts.
+ conn = self.db.open()
+ try:
+ root = conn.root()
+ root['foo'] = 1
+ f = Folder()
+ f.id = 'bar'
+ root['bar'] = f
+ get_transaction().commit()
+ conn2 = self.db.open()
+ try:
+ root2 = conn2.root()
+ root2['foo'] = 2
+ self.assertEqual(root2['bar']._p_changed, None)
+ get_transaction().commit()
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
+
+
+ def test_btreefolder2(self):
+ from Products.BTreeFolder2.BTreeFolder2 import BTreeFolder2
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = BTreeFolder2('Holidays')
+ f.id = 'Holidays'
+ app._setObject(f.id, f, set_owner=0)
+ f2 = Folder()
+ f2.id = 'Easter'
+ app.Holidays._setObject(f2.id, f2)
+ get_transaction().commit()
+ # Verify serialize() found the unmanaged subobjects.
+ self.assertEqual(app.Holidays._tree._p_oid, 'unmanaged')
+ # Sanity check
+ self.assertEqual(app.Holidays.objectCount(), 1)
+
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ self.assert_(app2.Holidays._tree.has_key('Easter'))
+ self.assert_(not app2.Holidays.__dict__.has_key('Easter'))
+ # Verify deserialize() found the unmanaged subobjects.
+ self.assertEqual(app2.Holidays._tree._p_oid, 'unmanaged')
+ app2.Holidays._delObject('Easter')
+ get_transaction().commit()
+ finally:
+ conn2.close()
+
+ # The deletion should be seen by both connections.
+ conn.sync()
+ self.assertEqual(app.Holidays.objectCount(), 0)
+
+ finally:
+ conn.close()
+
+
+ def test_deactivate_unmanaged_persistent(self):
+ # Some Zope code deactivates unmanaged persistent objects.
+ # Verify that Ape can handle it.
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ f.stowaway = Folder()
+ f.stowaway.id = 'stowaway'
+ f.stowaway._prop = 'value1'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+ f.stowaway._p_deactivate()
+ self.assertEqual(f.stowaway._prop, 'value1')
+
+ # Check aborting changes to an unmanaged object.
+ f.stowaway._prop = 'value2'
+ self.assertEqual(f._p_changed, 1)
+ get_transaction().abort()
+ self.assertEqual(f.stowaway._prop, 'value1')
+ self.assertEqual(f._p_changed, 0)
+ finally:
+ conn.close()
+
+
+ def test_upo_state_after_deactivate(self):
+ # An unmanaged persistent object that gets deactivated
+ # and reactivated should have the most recent state.
+ self.db.setCacheSize(10) # Don't flush the objects at commit
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = Folder()
+ f.id = 'Holidays'
+ f.stowaway = Folder()
+ f.stowaway.id = 'stowaway'
+ f.stowaway._prop = 'value1'
+ app._setObject(f.id, f, set_owner=0)
+ get_transaction().commit()
+ self.assertEqual(f._p_changed, 0)
+
+ self.assertEqual(f.stowaway._p_oid, 'unmanaged')
+ f.stowaway._prop = 'value2'
+ get_transaction().commit()
+ self.assertEqual(f._p_changed, 0)
+
+ del f.stowaway._p_changed
+ self.assertEqual(f.stowaway._p_changed, None)
+ self.assertEqual(f.stowaway._prop, 'value2')
+ finally:
+ conn.close()
+
+
+ def test_dcworkflow(self):
+ # Verifies storing a DCWorkflow instance doesn't blow up
+ try:
+ from Products.DCWorkflow.Default import createDefaultWorkflowRev2
+ except ImportError:
+ print
+ print 'Warning: Not running the DCWorkflow test'
+ return
+ conn = self.db.open()
+ try:
+ app = conn.root()['Application']
+ f = createDefaultWorkflowRev2('flow')
+ app._setObject(f.id, f)
+ get_transaction().commit()
+
+ conn2 = self.db.open()
+ try:
+ app2 = conn2.root()['Application']
+ self.assertEqual(app2.flow.states.private.getId(), 'private')
+ finally:
+ conn2.close()
+ finally:
+ conn.close()
Property changes on: Products.Ape/trunk/lib/apelib/tests/zope2testbase.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,17 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""apelib.zodb3 package
+
+$Id$
+"""
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/connection.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/connection.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/connection.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,617 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Extension of the ZODB Connection class
+
+$Id$
+"""
+
+import sys
+from time import time
+from types import StringType, TupleType, DictType
+from cStringIO import StringIO
+from cPickle import Unpickler, Pickler
+
+from Acquisition import aq_base
+from Persistence import Persistent
+from ZODB.Transaction import Transaction
+from ZODB.POSException \
+ import ConflictError, ReadConflictError, InvalidObjectReference, \
+ StorageError
+from ZODB.Connection import Connection
+from ZODB.ConflictResolution import ResolvedSerial
+from zLOG import LOG, ERROR
+
+from consts import HASH0
+from apelib.core.io import ObjectSystemIO
+from apelib.core.interfaces import IObjectDatabase, LoadError
+
+
+class ApeConnection (Connection):
+ """Mapper-driven Connection
+
+ Uses a mapper to serialize the state of objects before
+ pickling, and to deserialize objects based on the pickled
+ state.
+
+ The mapper might, for example, serialize all objects as
+ tabular records.
+ """
+ _osio = None
+ _scan_ctl = None
+ loaded_objects = False
+
+ __implements__ = (IObjectDatabase,
+ getattr(Connection, '__implements__', ()))
+
+
+ def _setDB(self, odb):
+ Connection._setDB(self, odb)
+ pool_ctl = odb.pool_scan_ctl
+ if pool_ctl is not None:
+ ctl = self._scan_ctl
+ if ctl is None:
+ self._scan_ctl = ctl = pool_ctl.new_connection()
+ if ctl.elapsed():
+ # Scan inside a transaction.
+ get_transaction().register(self)
+ # Let the scanner know which OIDs matter.
+ ctl.set_oids(self._cache.cache_data.keys())
+ # If it's time, scan on behalf of the whole pool.
+ if pool_ctl.elapsed():
+ pool_ctl.scan()
+ # If there were any invalidations, process them now.
+ if self._invalidated:
+ self._flush_invalidations()
+
+
+ def _prepare_root(self):
+ osio = self._get_osio()
+ oid = osio.conf.oid_gen.root_oid
+ try:
+ self[oid]
+ except (KeyError, LoadError):
+ # Create the root object.
+ from Persistence import PersistentMapping
+ root = PersistentMapping()
+ root._p_jar = self
+ root._p_changed = 1
+ root._p_oid = oid
+ t = Transaction()
+ t.note('Initial database creation')
+ self.tpc_begin(t)
+ self.commit(root, t)
+ self.tpc_vote(t)
+ self.tpc_finish(t)
+
+ def root(self):
+ osio = self._get_osio()
+ oid = osio.conf.oid_gen.root_oid
+ return self[oid]
+
+ def _get_osio(self):
+ """Returns an ObjectSystemIO.
+ """
+ osio = self._osio
+ if osio is None:
+ conf = self._db._conf_resource.access(self)
+ osio = ObjectSystemIO(conf, self)
+ self._osio = osio
+ return osio
+
+
+ def close(self):
+ db = self._db
+ try:
+ Connection.close(self)
+ finally:
+ if db is not None and self._osio is not None:
+ self._osio = None
+ db._conf_resource.release(self)
+
+
+ def __getitem__(self, oid, tt=type(())):
+ obj = self._cache.get(oid, None)
+ if obj is not None:
+ return obj
+
+ __traceback_info__ = (oid)
+ self.before_load()
+ p, serial = self._storage.load(oid, self._version)
+ __traceback_info__ = (oid, p)
+ file=StringIO(p)
+ unpickler=Unpickler(file)
+ # unpickler.persistent_load=self._persistent_load
+
+ try:
+ classification = unpickler.load()
+ except:
+ raise ("Could not load oid %s. Pickled data in traceback info may "
+ "contain clues." % (oid))
+ osio = self._get_osio()
+ obj = osio.new_instance(oid, classification)
+ assert obj is not None
+
+ obj._p_oid=oid
+ obj._p_jar=self
+ obj._p_changed=None
+ self._set_serial(obj, serial)
+
+ self._cache[oid] = obj
+
+ if oid == osio.conf.oid_gen.root_oid:
+ self._root_=obj # keep a ref
+ return obj
+
+
+ def _persistent_load(self, oid, classification=None):
+
+ __traceback_info__=oid
+
+ obj = self._cache.get(oid, None)
+ if obj is not None:
+ return obj
+
+ if classification:
+ osio = self._get_osio()
+ obj = osio.new_instance(oid, classification)
+ if obj is not None:
+ obj._p_oid=oid
+ obj._p_jar=self
+ obj._p_changed=None
+ self._cache[oid] = obj
+ return obj
+
+ # We don't have enough info for fast loading. Load the whole object.
+ return self[oid]
+
+
+ def _may_begin(self, transaction):
+ if hasattr(self, '_begun') and not self._begun:
+ self._storage.tpc_begin(transaction)
+ self._begun = 1
+
+
+ def commit(self, obj, transaction):
+ if obj is self:
+ self._may_begin(transaction)
+ # We registered ourself. Execute a commit action, if any.
+ if self._Connection__onCommitActions is not None:
+ method_name, args, kw = \
+ self._Connection__onCommitActions.pop(0)
+ apply(getattr(self, method_name), (transaction,) + args, kw)
+ return
+ oid=obj._p_oid
+ assert oid != 'unmanaged', repr(obj)
+ #invalid=self._invalidated.get
+ invalid = self._invalid
+
+ modified = getattr(self, '_modified', None)
+ if modified is None:
+ modified = self._invalidating
+
+ if oid is None or obj._p_jar is not self:
+ # new object
+ oid = self.new_oid()
+ obj._p_jar=self
+ obj._p_oid=oid
+ self._creating.append(oid)
+
+ elif obj._p_changed:
+ if (
+ (invalid(oid) and not hasattr(obj, '_p_resolveConflict'))
+ or
+ invalid(None)
+ ):
+ raise ConflictError(object=obj)
+ modified.append(oid)
+
+ else:
+ # Nothing to do
+ return
+
+ self._may_begin(transaction)
+
+ stack=[obj]
+
+ file=StringIO()
+ seek=file.seek
+ pickler=Pickler(file,1)
+ # SDH: external references are computed in a different way.
+ # pickler.persistent_id=new_persistent_id(self, stack.append)
+ dbstore=self._storage.store
+ file=file.getvalue
+ cache=self._cache
+ get=cache.get
+ dump=pickler.dump
+ clear_memo=pickler.clear_memo
+
+
+ version=self._version
+
+ while stack:
+ obj=stack[-1]
+ del stack[-1]
+ oid=obj._p_oid
+ assert oid != 'unmanaged', repr(obj)
+ serial = self._get_serial(obj)
+ if serial == HASH0:
+ # new object
+ self._creating.append(oid)
+ else:
+ #XXX We should never get here
+ # SDH: Actually it looks like we should, but only
+ # for the first object on the stack.
+ if (
+ (invalid(oid) and
+ not hasattr(obj, '_p_resolveConflict'))
+ or
+ invalid(None)
+ ):
+ raise ConflictError(object=obj)
+ modified.append(oid)
+
+ # SDH: hook in the serializer.
+ # state=obj.__getstate__()
+ osio = self._get_osio()
+ event, classification, state = osio.serialize(oid, obj)
+ ext_refs = event.external
+ if ext_refs:
+ for (ext_oid, ext_ref) in ext_refs:
+ assert ext_oid
+ assert ext_ref is not None
+ if self._cache.get(ext_oid, None) is not ext_ref:
+ # New object or a bad reference
+ if ext_ref._p_jar is not None:
+ if ext_ref._p_jar is not self:
+ raise InvalidObjectReference, (
+ "Can't refer from %s in %s to %s in %s"
+ % (repr(obj), repr(self), repr(ext_ref),
+ repr(ext_ref._p_jar)))
+ else:
+ ext_ref._p_jar = self
+ if ext_ref._p_oid:
+ if ext_ref._p_oid != ext_oid:
+ raise StorageError('Conflicting OIDs')
+ else:
+ ext_ref._p_oid = ext_oid
+ stack.append(ext_ref)
+
+ if event.upos:
+ self._handle_unmanaged(obj, event.upos)
+
+ seek(0)
+ clear_memo()
+ dump(classification)
+ dump(state)
+ p=file(1)
+ s=dbstore(oid,serial,p,version,transaction)
+ self._store_count = self._store_count + 1
+
+ # Put the object in the cache before handling the
+ # response, just in case the response contains the
+ # serial number for a newly created object
+ try: cache[oid] = obj
+ except:
+ if aq_base(obj) is not obj:
+ # Yuck, someone tried to store a wrapper. Try to
+ # cache it unwrapped.
+ cache[oid] = aq_base(obj)
+ else:
+ raise
+
+ self._handle_serial(s, oid)
+
+
+ def setstate(self, obj):
+ oid=obj._p_oid
+
+ self.before_load()
+ try:
+ p, serial = self._storage.load(oid, self._version)
+ self._load_count = self._load_count + 1
+
+ # XXX this is quite conservative!
+ # We need, however, to avoid reading data from a transaction
+ # that committed after the current "session" started, as
+ # that might lead to mixing of cached data from earlier
+ # transactions and new inconsistent data.
+ #
+ # Note that we (carefully) wait until after we call the
+ # storage to make sure that we don't miss an invaildation
+ # notifications between the time we check and the time we
+ # read.
+ #invalid = self._invalidated.get
+ invalid = self._invalid
+ if invalid(oid) or invalid(None):
+ if not hasattr(obj.__class__, '_p_independent'):
+ get_transaction().register(self)
+ raise ReadConflictError(object=obj)
+ invalid=1
+ else:
+ invalid=0
+
+ file=StringIO(p)
+ unpickler=Unpickler(file)
+ # SDH: external references are reassembled elsewhere.
+ # unpickler.persistent_load=self._persistent_load
+ classification = unpickler.load()
+ state = unpickler.load()
+
+ # SDH: Let the object mapper do the state setting.
+ # if hasattr(object, '__setstate__'):
+ # object.__setstate__(state)
+ # else:
+ # d=object.__dict__
+ # for k,v in state.items(): d[k]=v
+ osio = self._get_osio()
+ event = osio.deserialize(oid, obj, classification, state)
+
+ if event.upos:
+ self._handle_unmanaged(obj, event.upos)
+
+ self._set_serial(obj, serial)
+
+ if invalid:
+ if obj._p_independent():
+ try: del self._invalidated[oid]
+ except KeyError: pass
+ else:
+ get_transaction().register(self)
+ raise ConflictError(object=obj)
+
+ except ConflictError:
+ raise
+ except:
+ LOG('ZODB',ERROR, "Couldn't load state for %s" % `oid`,
+ error=sys.exc_info())
+ raise
+
+
+ def register(self, obj):
+ """Register an object with the appropriate transaction manager.
+ """
+ assert obj._p_jar is self
+ if obj._p_oid is not None:
+ get_transaction().register(obj)
+ # else someone is trying to trick ZODB into registering an
+ # object with no OID. OFS.Image.File._read_data() does this.
+ # Since ApeConnection really needs meaningful OIDs, just ignore
+ # the attempt.
+
+
+ def __repr__(self):
+ if self._version:
+ ver = ' (in version %s)' % `self._version`
+ else:
+ ver = ''
+ return '<%s at %08x%s>' % (self.__class__.__name__, id(self), ver)
+
+
+ def _handle_unmanaged(self, obj, unmanaged):
+ # Add an event handler to unmanaged subobjects.
+ # The event handler calls self.register() when it changes.
+ for o in unmanaged:
+ if hasattr(o, '_p_oid'): # Looks like a persistent object
+ if o._p_jar is None:
+ o._p_oid = 'unmanaged'
+ o._p_jar = UnmanagedJar(self, obj._p_oid, o)
+ else:
+ assert o._p_oid == 'unmanaged'
+ if o._p_changed is not None:
+ o._p_jar.save_state(o)
+
+
+ # IObjectDatabase implementation
+
+ get = _persistent_load
+
+ def identify(self, obj):
+ try:
+ oid = obj._p_oid
+ except AttributeError:
+ raise TypeError("%s does not subclass Persistent" % repr(obj))
+ if oid is None:
+ return None
+ if obj._p_jar is not self:
+ raise InvalidObjectReference, (
+ "Can't refer to %s, located in %s, from %s"
+ % (repr(obj), repr(obj._p_jar), repr(self)))
+ return oid
+
+ def new_oid(self):
+ return self._storage.new_oid()
+
+
+ def get_class(self, module, name):
+ return self._db._classFactory(self, module, name)
+
+
+ def check_serials(self):
+ """Verifies that all cached objects are in sync with the data.
+
+ This is useful for finding gateways that generate inconsistent
+ hashes.
+ """
+ for oid, ob in self._cache.items():
+ if ob._p_changed is not None:
+ self.before_load()
+ p, serial = self._storage.load(oid, self._version)
+ if serial != self._get_serial(ob):
+ raise StorageError(
+ "Inconsistent serial for oid %s" % repr(oid))
+
+ def before_load(self):
+ """Add self to the transaction before loading objects.
+
+ This causes databases to be notified when the transaction
+ completes.
+ """
+ if self._storage is None:
+ text = ("Shouldn't load state for %s "
+ "when the connection is closed" % `oid`)
+ LOG('ZODB', ERROR, text)
+ raise RuntimeError(text)
+ if not self.loaded_objects:
+ self.loaded_objects = True
+ get_transaction().register(self)
+
+ def tpc_abort(self, transaction):
+ self.loaded_objects = False
+ Connection.tpc_abort(self, transaction)
+
+ def tpc_finish(self, transaction):
+ self.loaded_objects = False
+ Connection.tpc_finish(self, transaction)
+
+ def exportFile(self, oid, file=None):
+ raise NotImplementedError('ZEXP Export not implemented')
+
+ def importFile(self, file, clue='', customImporters=None):
+ raise NotImplementedError('ZEXP Import not implemented')
+
+
+ # A note on serials: Serials need to be stored independently of
+ # objects because the current Persistent base class uses _p_serial
+ # to derive _p_mtime. Applications like Zope use _p_mtime, but
+ # the _p_serial for Ape isn't always a date, so Ape can't use
+ # _p_serial to store serials. Instead, ApeConnection puts them in
+ # a _serials dictionary.
+
+ _serials = None
+ serial_cleanup_threshold = 1000
+
+ def _get_serial(self, ob):
+ oid = ob._p_oid
+ if oid is None or self._cache.get(oid, None) is not ob:
+ return HASH0
+ serials = self._serials
+ if serials is None:
+ return HASH0
+ return serials.get(oid, HASH0)
+
+ def _set_serial(self, ob, s):
+ oid = ob._p_oid
+ assert oid is not None
+ if s is None:
+ s = HASH0
+ serials = self._serials
+ if serials is None:
+ serials = {}
+ self._serials = serials
+ if not serials.has_key(oid):
+ # When the number of recorded serials exceeds the number of
+ # cache entries by serial_cleanup_threshold, prune the serials
+ # dictionary.
+ if (len(serials) >= len(self._cache) +
+ self.serial_cleanup_threshold):
+ # clean up
+ cache_get = self._cache.get
+ for oid in serials.keys():
+ ob = cache_get(oid, None)
+ if ob is None or ob._p_changed is None:
+ del serials[oid]
+ serials[oid] = s
+
+ def _handle_serial(self, store_return, oid=None, change=1):
+ """Handle the returns from store() and tpc_vote() calls."""
+
+ # These calls can return different types depending on whether
+ # ZEO is used. ZEO uses asynchronous returns that may be
+ # returned in batches by the ClientStorage. ZEO1 can also
+ # return an exception object and expect that the Connection
+ # will raise the exception.
+
+ # When commit_sub() exceutes a store, there is no need to
+ # update the _p_changed flag, because the subtransaction
+ # tpc_vote() calls already did this. The change=1 argument
+ # exists to allow commit_sub() to avoid setting the flag
+ # again.
+ if not store_return:
+ return
+ if isinstance(store_return, StringType):
+ assert oid is not None
+ serial = store_return
+ obj = self._cache.get(oid, None)
+ if obj is None:
+ return
+ if serial == ResolvedSerial:
+ del obj._p_changed
+ else:
+ if change:
+ obj._p_changed = 0
+ #obj._p_serial = serial
+ self._set_serial(obj, serial)
+ else:
+ for oid, serial in store_return:
+ if not isinstance(serial, StringType):
+ raise serial
+ obj = self._cache.get(oid, None)
+ if obj is None:
+ continue
+ if serial == ResolvedSerial:
+ del obj._p_changed
+ else:
+ if change:
+ obj._p_changed = 0
+ #obj._p_serial = serial
+ self._set_serial(obj, serial)
+
+
+
+class UnmanagedJar:
+ """Special jar for unmanaged persistent objects.
+
+ There is one such jar for each unmanaged persistent object. All
+ it does is notify the managed persistent object of changes.
+
+ Some applications ghostify unmanaged persistent objects. To
+ restore the state after ghostification, this jar keeps a reference
+ to the state and restores it in setstate(). Note that when the
+ managed persistent object that holds the unmanaged object gets
+ ghosted, it usually removes the last reference to the unmanaged
+ object, which is then deallocated.
+ """
+
+ def __init__(self, real_jar, real_oid, obj):
+ self.real_jar = real_jar
+ self.real_oid = real_oid
+ self.save_state(obj)
+
+ def save_state(self, obj):
+ s = obj.__getstate__()
+ if isinstance(s, DictType):
+ s = s.copy()
+ self.state = s
+
+ def register(self, obj):
+ o = self.real_jar[self.real_oid]
+ if o._p_changed is None:
+ # The application held on to this UPO even after its
+ # container was ghosted. The container needs to be
+ # reactivated, but reactivation would create a new UPO in
+ # place of the UPO held by this jar. The application
+ # would continue to refer to this old UPO. Don't let the
+ # application continue to change this abandoned object,
+ # since all changes will be lost.
+ raise StorageError(
+ 'Tried to change an unmanaged persistent object '
+ 'when the containing persistent object is a ghost')
+ o._p_changed = 1
+
+ def setstate(self, obj):
+ obj.__setstate__(self.state)
+
+ def modifiedInVersion(self, oid):
+ # XXX PersistentExtra wants this
+ return ''
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/connection.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/consts.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/consts.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/consts.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,20 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Constants for this package.
+
+$Id$
+"""
+
+HASH0 = '\0' * 8
+HASH1 = '\0' * 7 + '\001'
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/consts.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/db.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/db.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/db.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,136 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Extension of the ZODB DB class
+
+$Id$
+"""
+
+import cPickle
+import cStringIO
+
+from ZODB.DB import DB, Transaction, allocate_lock
+from apelib.core.interfaces import ConfigurationError
+
+from connection import ApeConnection
+from storage import ApeStorage
+from resource import StaticResource
+from interfaces import IResourceAccess
+
+
+def call_conf_factory(factory, kw):
+ """Returns (conf, conns) given the name of a factory and arguments.
+ """
+ pos = factory.rfind('.')
+ if pos < 0:
+ raise ConfigurationError(
+ 'factory must be a string containing <module>.<name>')
+ module = factory[:pos]
+ name = factory[pos + 1:]
+ m = __import__(module, {}, {}, (name,))
+ f = getattr(m, name)
+ return f(**kw)
+
+
+class ApeDB (DB):
+ """Mapper-driven Database
+ """
+
+ klass = ApeConnection
+
+ # SDH: some extra args.
+ def __init__(self, storage,
+ conf_resource=None,
+ factory=None,
+ scan_interval=10,
+ pool_size=7,
+ cache_size=400,
+ cache_deactivate_after=60,
+ version_pool_size=3,
+ version_cache_size=100,
+ version_cache_deactivate_after=10,
+ **kw
+ ):
+ """Create an object database.
+ """
+ if conf_resource is None:
+ if factory is not None:
+ # Use a configuration factory
+ conf, connections = call_conf_factory(factory, kw)
+ conf_resource = StaticResource(conf)
+ else:
+ if kw:
+ raise ConfigurationError('Extra keyword args: %s' % kw)
+ if isinstance(storage, ApeStorage):
+ # Use the configuration from the storage
+ conf_resource = storage.conf_resource
+ else:
+ raise ConfigurationError(
+ 'No configuration or factory specified')
+ else:
+ # conf_resource was specified
+ if kw:
+ raise ConfigurationError('Extra keyword args: %s' % kw)
+ assert IResourceAccess.isImplementedBy(conf_resource)
+ assert factory is None
+
+ # Allocate locks:
+ l=allocate_lock()
+ self._a=l.acquire
+ self._r=l.release
+
+ # Setup connection pools and cache info
+ self._pools={},[]
+ self._temps=[]
+ self._pool_size=pool_size
+ self._cache_size=cache_size
+ self._cache_deactivate_after = cache_deactivate_after
+ self._version_pool_size=version_pool_size
+ self._version_cache_size=version_cache_size
+ self._version_cache_deactivate_after = version_cache_deactivate_after
+
+ self._miv_cache={}
+
+ # Setup storage
+ self._storage=storage
+ storage.registerDB(self, None)
+ if not hasattr(storage,'tpc_vote'): storage.tpc_vote=lambda *args: None
+
+ self._conf_resource = conf_resource
+ scan_interval = int(scan_interval)
+ if scan_interval > 0:
+ from scanner import PoolScanControl, Scanner
+ pool_ctl = PoolScanControl(storage, db=self, scan_interval=scan_interval)
+ self.pool_scan_ctl = pool_ctl
+ scanner = Scanner()
+ storage.scanner = scanner
+ scanner.storage = storage
+ else:
+ self._scan_ctl = None
+
+ # Pass through methods:
+ for m in ('history',
+ 'supportsUndo', 'supportsVersions', 'undoLog',
+ 'versionEmpty', 'versions'):
+ setattr(self, m, getattr(storage, m))
+
+ if hasattr(storage, 'undoInfo'):
+ self.undoInfo=storage.undoInfo
+
+ # Create the root object if it doesn't exist
+ c = self.open()
+ try:
+ c._prepare_root()
+ finally:
+ c.close()
+
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/db.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/interfaces.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/interfaces.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/interfaces.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,35 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Interfaces for apelib.zodb3.
+
+$Id$
+"""
+
+from Interface import Interface
+
+
+class IResourceAccess (Interface):
+ """Provides access to a resource that may need periodic updates.
+ """
+
+ def access(consumer):
+ """Returns the resource.
+ """
+
+ def release(consumer):
+ """Indicates the given consumer is finished with the resource.
+
+ The implementation may take an opportunity to update the resource.
+ """
+
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/interfaces.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/notes.txt
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/notes.txt (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/notes.txt 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,206 @@
+
+--------------------------------------------------
+Notes for moving the Connection class to ZODB 3.4+
+--------------------------------------------------
+
+At the heart of Connection is an ObjectSystemIO object. (See
+apelib.core.io.) ObjectSystemIO and all of the objects it calls upon
+are intended to be potentially reusable with other persistence
+frameworks. Connection is not.
+
+- _setDB(): this is purely for background scanning, but I want to make all
+scanning external, so this override should go away.
+
+- _prepare_root(): this creates the root PersistentMapping if it doesn't
+exist. It does it in a more modular way than standard ZODB. Keep it.
+
+- root(): Ape allows the configured OID generator to choose the root
+OID, rather than assume the root is ('\0' * 8). However, note that
+both _prepare_root() and root() have the same logic for determining
+the root OID. If that logic is moved into a separate method, perhaps
+root_oid(), _prepare_root() and root() will no longer have
+dependencies on Ape and they can move into ZODB.
+
+- _get_osio(): Returns the ObjectSystemIO for use by other methods.
+The resource stuff is in there to allow the object mapping to change
+without restarting the process. This was an early idea that never
+took off, and today I doubt it would be safe (or even useful) to
+change mappings mid-process. We should rip out the resource
+indirection.
+
+- close(): if you rip out the resource indirection, Ape doesn't need to
+override close().
+
+- __getitem__(): the only thing different from ZODB is the way we load
+the pickle. Note that pickles in Ape have a slightly different format
+from standard ZODB. ZODB pickles consist of two pickles: a tuple
+about the object's class, then the object's state. Ape also stores
+two pickles, but the first pickle is a dictionary containing
+classification information and the second pickle is the output of the
+object's serializer. ZODB pickles normally contain inter-object
+references, but Ape pickles do not; instead, the serializer converts
+inter-object references into normal data that's easy to pickle and
+unpickle. (The line that sets unpickler.persistent_load is commented
+out because Ape doesn't expect the pickle to have persistent
+references.)
+
+Note that in theory, the connection doesn't really need to pickle or
+unpickle. Everything the connection pickles is immediately unpickled
+by the storage. Also, when loading objects, the storage pickles
+object data just before the connection unpickles it. Ape does this
+for two reasons: to conform with the existing storage interface, which
+expects pickles, and for ZEO compatibility. ZEO is important, so Ape
+should continue to pickle, but eventually I'd like a mode that doesn't
+bother pickling when ZEO is not involved. Ape might get a little
+speed boost.
+
+- _persistent_load(): this has an optimization similar to the one in
+standard ZODB. In standard ZODB, the OID passed to _persistent_load
+can be either a string or a tuple; if it's a tuple, ZODB can usually
+infer the object's class without actually loading the object, saving a
+lot of time. In Ape, the OID is always a string, but sometimes
+serializers pass a second argument to the get() method (which is an
+alias for _persistent_load.) get() is documented in
+apelib.core.interfaces.IObjectDatabase. We should rename
+_persistent_load() to get().
+
+- _may_begin(): an artifact for retaining compatibility with older
+ZODBs. Get rid of it.
+
+- commit(): ZODB needs a lot more hooks in commit(). I hope the
+latest ZODB has more hooks. Most of commit() is copied.
+
+ - assert oid != 'unmanaged': a safety measure that's worth
+ discussing. Normal ZODB classifies objects stored in the database
+ in two ways: either the object's class subclasses Persistent,
+ meaning the object will be stored in its own database record, or the
+ object's class does not subclass Persistent, meaning the object will
+ be stored inside some other object's database record. Ape
+ introduces a third classification: objects that subclass Persistent,
+ but whose mapper decides to store the object inside another record
+ anyway. Ape calls these objects unmanaged persistent objects
+ (UPOs), and their _p_oid is always "unmanaged". Also, each of them
+ have their own private _p_jar. Therefore, this commit() method
+ should never see UPOs, but badly behaved application code can mix
+ things up, and we don't want to choke.
+
+ - The line that sets pickler.persistent_id is commented out because,
+ again, Ape doesn't store inter-object references this way.
+ Serializers handle inter-object references.
+
+ (skip lots of copied code)
+
+ - Ape replaces the __getstate__ call with a call to
+ ObjectSystemIO.serialize(). Then, for each external reference
+ generated by the serializers, it tries to find that object in
+ self._cache. If it's not in the cache, commit() assigns an OID to
+ the object and queues the object for commit. Also, it verifies the
+ reference doesn't conflict with some other object already in the
+ cache, and it ensures the object isn't in another _p_jar. (The last
+ part of the logic may change when the new database mount machinery
+ lands.)
+
+ - If the serialization work generated UPOs, commit() calls
+ _handle_unmanaged. _handle_unmanaged sets the _p_jar for each UPO.
+ The special _p_jar enables Ape to store the containing object when
+ one of its contained UPOs changes.
+
+ (more copied code)
+
+ - The call to _handle_serial is important. See below.
+
+- setstate(): Also needs more hooks. Mostly copied, except the
+middle. As before, ZODB pickles have a class tuple followed by a
+class dictionary, while Ape pickles have a classification dictionary
+followed by an object state to be fed to the serializer.
+ObjectSystemIO encapsulates most of the work. Don't forget to call
+_handle_unmanaged if the serializers saw UPOs while they were loading
+the objects.
+
+- register(): Unlike standard Connection, Ape can't deal with an attempt
+to store an object with no _p_oid. Ape needs to know the object's
+lineage. For example, if a Folder is contained in a UserFolder, a
+mapper can choose to store the Folder differently than if it's
+contained in an Application object. Therefore, Ape ignores attempts
+to store an object with no _p_oid. In the future, it probably should
+raise an exception, but some Zope code will need to be changed first.
+
+- get(), identify(), and new_oid(): implementation of IObjectSystem.
+
+check_serials(): for debugging. It scans the cache for objects whose
+state is not 100% consistent with the database. I don't think
+anything calls it.
+
+- before_load(): registers self in the transaction, ensuring that this
+Connection participates in the transaction even if no objects change.
+Normal ZODB registers objects in the transaction only when they
+change. __getitem__(), setstate(), and check_serials call this.
+before_load() was added only recently. Until it was added, Ape left
+RDBMS connections open without committing or aborting the transaction.
+I was initially concerned this solution would cause a performance hit,
+but the extra transactions made no apparent difference to the old,
+slow computer that runs my web site. :-)
+
+- tpc_abort(), tpc_finish(): hooks to reset the loaded_objects flag.
+The loaded_objects flag only exists because there isn't a way to ask
+the transaction whether some object (self in particular) is already
+registered in the transaction, and registering self more than once
+consumes resources. So Connection remembers for itself whether it has
+registered itself. This may actually be a good pattern to keep, but I
+think the attribute could have a better name.
+
+- exportFile(), importFile(): gotta get around to this. :-)
+
+- _get_serial, _set_serial, _handle_serial: ZODB has an ugly
+optimization that's really hard for Ape to deal with. _p_serial
+attributes are required to have exactly 8 characters, and _p_mtime is
+derived from _p_serial. ZODB uses _p_serial for three separate pieces
+of information:
+
+ - if _p_serial is '\0' * 8, the object is new. Otherwise the object
+ has been stored before.
+
+ - _p_serial is used for conflict detection. If the connection wants
+ to replace an object in the database, the connection is required to
+ provide the _p_serial of the object's previous state. If the old
+ _p_serial doesn't match, apparently some other connection modified
+ the object concurrently and the conflict needs to be resolved or the
+ transaction aborted.
+
+ - _p_serial is used for storing the time the object was last modified.
+
+The first and second meanings fit together, but Ape distinguishes the
+first two meanings from the third. Ape frequently can't know exactly
+when an object was last modified, and even when it does, that
+information isn't always reliable. So Ape instead uses a hash of the
+object's state for conflict detection. Ape's storage layer squeezes
+the state hash into 64 bits and calls that the serial.
+
+Ape also needs to provide _p_mtime for applications that depend on it.
+The only way to provide _p_mtime is to set _p_serial. So Ape fakes
+_p_mtime by setting _p_serial. Then it stores all of the real serials
+in the connection's _serials attribute.
+
+The _p_serial mess would disappear if ZODB separated _p_serial and
+_p_mtime into independent fields and allowed _p_serial to contain an
+arbitrary object.
+
+- UnmanagedJar class: every UPO has one of these for its _p_jar.
+real_jar and real_oid are the connection and oid of the Persistent
+object that contains the UPO. The register() method is the important
+method; it causes the container of the UPO to be stored when the UPO
+changes.
+
+Also, UnmanagedJar preserves the object's state. This class would not
+have to bother with saving the object's state if it weren't for the
+fact that some applications try to release memory by setting
+obj._p_changed = None, telling the Persistent base class to clear the
+object's __dict__. If the object is a UPO, releasing its state is a
+problem: if the containing object is changed when the UPO state is
+gone, the containing object's state will be incomplete (since its
+state is supposed to include the state of the UPO.) Therefore,
+UnmanagedJar preserves the UPO's state in itself. It would be much
+better, though, to be able to really prevent UPOs from being ghosted.
+The current C code assumes that objects with a _p_jar and _p_oid are
+always eligible for ghosting.
+
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/notes.txt
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/resource.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/resource.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/resource.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,35 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Resource access
+
+$Id$
+"""
+
+from interfaces import IResourceAccess
+
+
+class StaticResource:
+ """Simple, static resource"""
+
+ __implements__ = IResourceAccess
+
+ def __init__(self, r):
+ self.r = r
+
+ def access(self, consumer):
+ return self.r
+
+ def release(self, consumer):
+ pass
+
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/resource.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/scanner.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/scanner.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/scanner.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,286 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Cache scanner.
+
+Keeps a cache up to date by scanning for changes.
+
+$Id$
+"""
+
+from thread import allocate_lock
+from time import time
+
+from BTrees.OOBTree import OOBTree, OOSet, difference
+from BTrees.IOBTree import IOBTree
+from zLOG import LOG, DEBUG
+
+# FUTURE_TIMEOUT defines how long to keep source information regarding
+# OIDs that might be used soon.
+future_timeout = 10 * 60
+
+
+class PoolScanControl:
+ """Scanning for a pool of connections.
+
+ A ScanControl instance is an attribute of an ApeDB instance. The
+ actual scanning is delegated to a Scanner instance attached to an
+ ApeStorage. The delegation theoretically permits scanning to
+ occur on a ZEO server while the ScanControl instances run on
+ separate ZEO clients.
+
+ Assigns scanner-specific identities to database connections for
+ the purpose of tracking which OIDs are still in use.
+ """
+
+ def __init__(self, storage, db=None, scan_interval=10):
+ self.storage = storage
+ self.db = db
+ self.next_conn_id = 1
+ self.conn_oids = IOBTree() # IOBTree({ conn_id -> OOSet([oid]) } })
+ self.oids = OOSet() # OOSet([oid])
+ self.lock = allocate_lock()
+ self.scan_interval = scan_interval
+ self.next_scan = time() + scan_interval
+
+
+ def new_connection(self):
+ """Returns a ConnectionScanControl to attach to a new connection.
+ """
+ self.lock.acquire()
+ try:
+ conn_id = self.next_conn_id
+ self.next_conn_id = conn_id + 1
+ return ConnectionScanControl(self, conn_id)
+ finally:
+ self.lock.release()
+
+
+ def set_connection_oids(self, conn_id, oids):
+ """Records the OIDs a connection is using and periodically scans.
+ """
+ changed = 0
+ new_oids = OOSet()
+ self.lock.acquire()
+ try:
+ if oids:
+ self.conn_oids[conn_id] = OOSet(oids)
+ else:
+ if self.conn_oids.has_key(conn_id):
+ del self.conn_oids[conn_id]
+ for set in self.conn_oids.values():
+ new_oids.update(set)
+ if self.oids != new_oids:
+ self.oids = new_oids
+ changed = 1
+ finally:
+ self.lock.release()
+ if changed:
+ self.storage.scanner.set_oids(new_oids)
+
+
+ def elapsed(self):
+ """Returns true if the scan interval has elapsed.
+ """
+ now = time()
+ if now >= self.next_scan:
+ self.next_scan = now + self.scan_interval
+ return 1
+ return 0
+
+
+ def scan(self):
+ """Runs a scan and sends invalidation messages to the database.
+ """
+ LOG('Ape', DEBUG, 'Scanning %d objects.' % len(self.oids))
+ scanner = self.storage.scanner
+ inv = scanner.scan()
+ scanner.prune_future()
+ LOG('Ape', DEBUG,
+ 'Finished scanning. %d objects changed.' % len(inv))
+ if inv:
+ # Some objects changed and the caches need to be invalidated.
+ d = {}
+ for oid in inv:
+ d[oid] = 1
+ if self.db is not None:
+ self.db.invalidate(d)
+ else:
+ LOG('Ape', DEBUG, "No database set, so can't invalidate!")
+
+
+class ConnectionScanControl:
+ """Scanning for a database connection (an ApeConnection.)
+
+ Delegates to a ScanControl, which in turn delegates to a Scanner.
+ """
+
+ def __init__(self, pool_ctl, conn_id):
+ self.pool_ctl = pool_ctl
+ self.conn_id = conn_id
+ self.next_update = 0
+
+ def elapsed(self):
+ """Returns true if the connection-specific scan interval has elapsed.
+
+ The interval prevents connections from calling set_oids() with
+ excessive frequency.
+ """
+ now = time()
+ if now >= self.next_update:
+ self.next_update = now + self.pool_ctl.scan_interval
+ return 1
+ return 0
+
+ def set_oids(self, oids):
+ """Records the OIDs this connection is using.
+ """
+ self.pool_ctl.set_connection_oids(self.conn_id, oids)
+
+
+class Scanner:
+ """Scanning for an ApeStorage.
+
+ Uses gateways to scan for changes.
+ """
+
+ def __init__(self):
+ self.current = OOBTree() # OOBTree({ oid -> {source->state} })
+ self.future = {} # { oid -> ({source->state}, atime) }
+ self.lock = allocate_lock()
+ self.storage = None
+
+ def set_oids(self, oids):
+ """Sets the list of OIDs to scan.
+
+ Gathers source information about new OIDs and discards
+ source information for OIDs no longer in use.
+ """
+ new_sources = {} # { oid -> sourcedict }
+ self.lock.acquire()
+ try:
+ removed = difference(self.current, oids)
+ for oid in removed.keys():
+ del self.current[oid]
+ added = difference(oids, self.current)
+ for oid in added.keys():
+ if self.future.has_key(oid):
+ # Source info for this OID was provided earlier.
+ sources, atime = self.future[oid]
+ del self.future[oid]
+ self.current[oid] = sources
+ else:
+ new_sources[oid] = None
+ finally:
+ self.lock.release()
+ if new_sources:
+ # Load source info the slow way.
+ if self.storage is not None:
+ LOG('Ape', DEBUG, 'Getting sources for %d oids.'
+ % len(new_sources))
+ new_sources = self.storage.get_all_sources(new_sources.keys())
+ else:
+ LOG('Ape', DEBUG, "Can't get sources for %d oids. "
+ "Assuming no sources!" % len(new_sources))
+ # This will cause the scanner to miss changes, but
+ # since no storage is known, there is little we can
+ # do.
+ for oid in new_sources.keys():
+ new_sources[oid] = {}
+ self.lock.acquire()
+ try:
+ for oid, sources in new_sources.items():
+ if not self.current.has_key(oid):
+ self.current[oid] = sources
+ # else something else added the source info
+ # while self.lock was released.
+ finally:
+ self.lock.release()
+
+
+ def after_load(self, oid, sources):
+ """Called by the storage after an object is loaded.
+ """
+ if sources is None:
+ sources = {}
+ self.lock.acquire()
+ try:
+ if not self.current.has_key(oid):
+ # This object is being loaded for the first time.
+ # Make a record of its current state immediately
+ # so that the next scan can pick up changes.
+ self.future[oid] = (sources, time())
+ # else we already have info about this object, and now
+ # isn't a good time to update self.current since that
+ # would prevent changes from being detected at a time when
+ # it's possible to send invalidation messages.
+ finally:
+ self.lock.release()
+
+
+ def scan(self):
+ """Scan sources, returning the OIDs of changed objects.
+ """
+ to_scan = {} # { repo -> { source -> state } }
+ to_invalidate = {} # { oid -> 1 }
+ self.lock.acquire() # lock because oid_states might be self.current.
+ try:
+ for oid, sources in self.current.items():
+ for source, state in sources.items():
+ repo, location = source
+ to_scan.setdefault(repo, {})[source] = state
+ finally:
+ self.lock.release()
+ changes = {}
+ for repo, d in to_scan.items():
+ c = repo.poll(d)
+ if c:
+ changes.update(c)
+ if changes:
+ # Something changed. Map the changes back to oids and
+ # update self.current.
+ self.lock.acquire()
+ try:
+ for oid, sources in self.current.items():
+ for source, state in sources.items():
+ if changes.has_key(source):
+ to_invalidate[oid] = 1
+ sources[source] = changes[source]
+ finally:
+ self.lock.release()
+ return to_invalidate.keys()
+
+
+ def prune_future(self):
+ """Prunes the cache of future source information.
+ """
+ if self.future:
+ self.lock.acquire()
+ try:
+ # OIDs older than some timeout will probably never be loaded.
+ cutoff = time() - future_timeout
+ for oid, (sources, atime) in self.future.items():
+ if atime < cutoff:
+ del self.future[oid]
+ finally:
+ self.lock.release()
+ LOG('Ape', DEBUG,
+ 'Future sources cache size: %d objects.' % len(self.future))
+
+
+ def changed_sources(self, oid, sources):
+ """Records changes to sources made by ZODB.
+ """
+ self.current[oid] = sources
+ if self.future.has_key(oid):
+ del self.future[oid]
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/scanner.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/serializers.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/serializers.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/serializers.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,362 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Serializers specific to ZODB3.
+
+$Id$
+"""
+
+import os
+from cStringIO import StringIO
+from cPickle import Pickler, Unpickler, UnpickleableError, loads, dumps
+import time
+import base64
+from types import DictType
+
+from Persistence import Persistent, PersistentMapping
+from ZODB.TimeStamp import TimeStamp
+
+from apelib.core.interfaces \
+ import ISerializer, IFullSerializationEvent, IFullDeserializationEvent
+from apelib.core.events import SerializationEvent, DeserializationEvent
+from apelib.core.interfaces import SerializationError
+from apelib.core.schemas import RowSequenceSchema, ColumnSchema
+
+
+def is_persistent(obj):
+ try:
+ return isinstance(obj, Persistent)
+ except TypeError:
+ # XXX Python 2.1 thinks Persistent is not a class
+ return 0
+
+def encode_to_text(s, keys, unmanaged_count=0):
+ """Encodes a binary pickle using base 64.
+
+ Note that Python's text pickle format encodes unicode using full
+ 8-bit bytes (Python versions 2.1 through 2.3 all do this), meaning
+ that so-called text pickles are not 7-bit clean. On the other
+ hand, the text pickle format is fairly easy to read, making
+ debugging easier. This encoding is a compromise that generates
+ pure 7-bit text but also provides an overview of what's in the
+ pickle.
+ """
+ comments = ['# pickle-base-64']
+ if keys:
+ keys = list(keys)
+ keys.sort()
+ comments[0] = comments[0] + ' contents:'
+ for key in keys:
+ r = repr(key).replace('\n', ' ')
+ comments.append('# %s' % r)
+ if unmanaged_count:
+ comments.append('# unmanaged persistent objects: %d' % unmanaged_count)
+ text = base64.encodestring(s)
+ return '%s\n%s' % ('\n'.join(comments), text)
+
+def decode_from_text(s):
+ """Decodes using base 64, ignoring leading comments.
+ """
+ i = s.rfind('#')
+ if i >= 0:
+ j = s.find('\n', i)
+ if j >= 0:
+ # Remove the comments.
+ s = s[j + 1:].strip()
+ return base64.decodestring(s)
+
+
+
+class PersistentMappingSerializer:
+ """(de)serializer of a persistent mapping that uses string keys.
+
+ Serializes both references and second-class persistent objects.
+ Because of this flexibility, the schema is a little complex.
+ """
+ __implements__ = ISerializer
+
+ # This schema includes both a list of items that are references to
+ # persistent objects and a pickle containing items that are not
+ # references.
+ schema1 = RowSequenceSchema()
+ schema1.add('key', 'string', 1)
+ schema1.add('oid', 'string')
+ schema1.add('classification', 'classification')
+ schema2 = ColumnSchema('data', 'string')
+ schema = {'references': schema1, 'others': schema2}
+
+ def can_serialize(self, obj):
+ return isinstance(obj, PersistentMapping)
+
+ def serialize(self, event):
+ assert self.can_serialize(event.obj)
+ refs = []
+ others = {}
+ for key, value in event.obj.items():
+ if is_persistent(value):
+ oid = event.obj_db.identify(value)
+ if oid is None:
+ oid = event.obj_db.new_oid()
+ event.referenced(key, value, False, oid)
+ # No need to pass classification.
+ refs.append((key, oid, None))
+ else:
+ event.serialized(key, value, False)
+ others[key] = value
+ event.ignore(('data', '_container'))
+ if others:
+ # Encode as a sorted list to preserve order.
+ others_list = others.items()
+ others_list.sort()
+ s = encode_to_text(dumps(others_list, 1), others.keys())
+ else:
+ s = ''
+ return {'references': refs, 'others': s}
+
+ def deserialize(self, event, state):
+ assert self.can_serialize(event.obj)
+ data_dict = {}
+ s = state['others']
+ if s:
+ s = decode_from_text(s)
+ if s:
+ data = loads(s)
+ if hasattr(data, 'items'):
+ # Stored as a dictionary
+ data_list = data.items()
+ data_dict = data
+ else:
+ # Stored as a sequence of tuples
+ data_list = data
+ for key, value in data:
+ data_dict[key] = value
+ for key, value in data_list:
+ event.deserialized(key, value)
+ for (key, oid, classification) in state['references']:
+ value = event.resolve(key, oid, classification)
+ data_dict[key] = value
+ event.obj.__init__(data_dict)
+
+
+class RollCall:
+ """Helps ensure all parts of an object get serialized.
+
+ Designed for debugging purposes.
+ """
+ __implements__ = ISerializer
+ schema = None # No storage
+
+ def can_serialize(self, obj):
+ return 1
+
+ def serialize(self, event):
+ assert IFullSerializationEvent.isImplementedBy(event)
+ attrs = event.get_seralized_attributes()
+ attrs_map = {}
+ for attr in attrs:
+ attrs_map[attr] = 1
+ missed = []
+ for k in event.obj.__dict__.keys():
+ if not k.startswith('_v_') and not attrs_map.has_key(k):
+ missed.append(repr(k))
+ if missed:
+ raise SerializationError(
+ 'Attribute(s) %s of object %s, oid=%s, not serialized' %
+ (', '.join(missed), repr(event.obj), repr(event.oid)))
+ return None
+
+ def deserialize(self, event, state):
+ assert state is None
+
+
+class RemainingState:
+ """(De)serializes the remaining state of a Persistent object"""
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('data', 'string')
+
+ def can_serialize(self, obj):
+ return is_persistent(obj)
+
+ def serialize(self, event):
+ assert IFullSerializationEvent.isImplementedBy(event)
+ assert isinstance(event.obj, Persistent)
+
+ # Allow pickling of cyclic references to the object.
+ event.serialized('self', event.obj, False)
+
+ # Ignore previously serialized attributes
+ state = event.obj.__dict__.copy()
+ for key in state.keys():
+ if key.startswith('_v_'):
+ del state[key]
+ for attrname in event.get_seralized_attributes():
+ if state.has_key(attrname):
+ del state[attrname]
+ if not state:
+ # No data needs to be stored
+ return ''
+
+ outfile = StringIO()
+ p = Pickler(outfile, 1) # Binary pickle
+ unmanaged = []
+
+ def persistent_id(ob, identify_internal=event.identify_internal,
+ unmanaged=unmanaged):
+ ref = identify_internal(ob)
+ if ref is None:
+ if hasattr(ob, '_p_oid'):
+ # Persistent objects that end up in the remainder
+ # are unmanaged. Tell ZODB about them so that
+ # ZODB can deal with them specially.
+ unmanaged.append(ob)
+ return ref
+
+ # Preserve order to a reasonable extent by storing a list
+ # instead of a dictionary.
+ state_list = state.items()
+ state_list.sort()
+ p.persistent_id = persistent_id
+ try:
+ p.dump(state_list)
+ except UnpickleableError, exc:
+ # Try to reveal which attribute is unpickleable.
+ attrname = None
+ attrvalue = None
+ for key, value in state_list:
+ del unmanaged[:]
+ outfile.seek(0)
+ outfile.truncate()
+ p = Pickler(outfile)
+ p.persistent_id = persistent_id
+ try:
+ p.dump(value)
+ except UnpickleableError:
+ attrname = key
+ attrvalue = value
+ break
+ if attrname is not None:
+ # Provide a more informative exception.
+ if os.environ.get('APE_TRACE_UNPICKLEABLE'):
+ # Provide an opportunity to examine
+ # the "attrvalue" attribute.
+ import pdb
+ pdb.set_trace()
+ raise RuntimeError(
+ 'Unable to pickle the %s attribute, %s, '
+ 'of %s at %s. %s.' % (
+ repr(attrname), repr(attrvalue), repr(event.obj),
+ repr(event.oid), str(exc)))
+ else:
+ # Couldn't help.
+ raise
+
+ p.persistent_id = lambda ob: None # Stop recording references
+ p.dump(unmanaged)
+ event.upos.extend(unmanaged)
+
+ s = outfile.getvalue()
+ return encode_to_text(s, state.keys(), len(unmanaged))
+
+
+ def deserialize(self, event, state):
+ assert IFullDeserializationEvent.isImplementedBy(event)
+ assert isinstance(event.obj, Persistent)
+
+ # Set up to resolve cyclic references to the object.
+ event.deserialized('self', event.obj)
+
+ state = state.strip()
+ if state:
+ if state.startswith('#'):
+ # Text-encoded pickles start with a pound sign.
+ # (A pound sign is not a valid pickle opcode.)
+ data = decode_from_text(state)
+ else:
+ data = state
+ infile = StringIO(data)
+ u = Unpickler(infile)
+ u.persistent_load = event.resolve_internal
+ s = u.load()
+ if not hasattr(s, 'items'):
+ # Turn the list back into a dictionary
+ s_list = s
+ s = {}
+ for key, value in s_list:
+ s[key] = value
+ event.obj.__dict__.update(s)
+ try:
+ unmanaged = u.load()
+ except EOFError:
+ # old pickle with no list of unmanaged objects
+ pass
+ else:
+ event.upos.extend(unmanaged)
+
+
+class ModTimeAttribute:
+ """Sets the _p_mtime attribute.
+
+ XXX Due to a ZODB limitation, this class has to set the _p_mtime
+ by setting _p_serial.
+ """
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('mtime', 'int')
+
+ def can_serialize(self, obj):
+ return is_persistent(obj)
+
+ def _set_time(self, obj, t):
+ """Sets the last modification time of a Persistent obj to float t.
+ """
+ args = time.gmtime(t)[:5] + (t%60,)
+ obj._p_serial = repr(TimeStamp(*args))
+
+ def serialize(self, event):
+ now = long(time.time())
+ if event.obj._p_changed:
+ # Indicate that this object just changed. Note that the time
+ # is a guess.
+ self._set_time(event.obj, now)
+ return now
+
+ def deserialize(self, event, state):
+ self._set_time(event.obj, state)
+
+
+def find_unmanaged(obj, managed):
+ """Gathers the list of unmanaged subobjects from an object.
+
+ 'managed' is a list of subobjects known to be managed.
+ """
+ d = {}
+ for m in managed:
+ d[id(m)] = m
+ outfile = StringIO()
+ p = Pickler(outfile, 1) # Binary pickle
+ unmanaged = []
+
+ def persistent_id(ob, d_get=d.get, unmanaged=unmanaged):
+ if d_get(id(ob)) is not None:
+ # Don't search inside managed subobjects.
+ return 'managed'
+ if hasattr(ob, '_p_oid'):
+ unmanaged.append(ob)
+ return None
+
+ p.persistent_id = persistent_id
+ p.dump(obj)
+ return unmanaged
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/serializers.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/storage.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/storage.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/storage.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,254 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Storage implementation that loads/stores using Ape mappers.
+
+$Id$
+"""
+
+import md5
+from cPickle import Pickler, Unpickler
+from cStringIO import StringIO
+
+from ZODB import POSException, BaseStorage
+
+from apelib.core.io import GatewayIO
+from consts import HASH0, HASH1
+from interfaces import IResourceAccess
+
+
+class ApeStorage(BaseStorage.BaseStorage):
+
+ def __init__(self, conf_resource, connections, name='', clear_all=0,
+ debug_conflicts=0):
+ """Initializes an ApeStorage.
+
+ conf_resource is a resource for loading the IMapperConfiguration.
+ connections is a mapping that maps names to ITPCConnections.
+ """
+ assert IResourceAccess.isImplementedBy(conf_resource)
+ self.conf_resource = conf_resource
+ gwio = GatewayIO(conf_resource.access(self), connections)
+ self._gwio = gwio
+ self._conn_list = gwio.get_connection_list()
+ gwio.open_connections()
+ self.init_databases(clear_all)
+ names = []
+ sort_keys = []
+ for c in gwio.get_connection_list():
+ names.append(c.getName())
+ sort_keys.append(c.sortKey())
+ self._sort_key = tuple(sort_keys)
+ if not name:
+ name = 'ApeStorage: ' + ', '.join(names)
+ self._ltid = None
+ self.scanner = None
+ self.changed = {} # {tid: {oid: 1}}
+ self.set_debug_conflicts(debug_conflicts)
+ BaseStorage.BaseStorage.__init__(self, name)
+
+ def __len__(self):
+ return 1
+
+ def getSize(self):
+ # Stub
+ return 1
+
+ def sortKey(self):
+ return self._sort_key
+
+ def getTransactionId(self):
+ if hasattr(self, '_tid'):
+ return self._tid
+ return self._serial
+
+ def init_databases(self, clear_all=0):
+ self._gwio.init_databases(clear_all=clear_all)
+ for c in self._conn_list:
+ c.vote()
+ c.finishWrite()
+ c.finishCommit()
+
+ def set_debug_conflicts(self, debug_conflicts):
+ self.debug_conflicts = debug_conflicts
+ if debug_conflicts:
+ self._loaded_hashes = {} # {oid: hash}
+
+ def hash64(self, value):
+ """Returns an 8-byte hash value.
+ """
+ if v < -2L ** 32:
+ # This is a hash on a 64-bit machine. Treat as unsigned
+ # 64-bit integer
+ v += 2L ** 64
+ elif v < 0:
+ # This is a hash on a 32-bit machine. Treat as an unsigned
+ # 32-bit integer
+ v += 2L ** 32
+ if v > 2L ** 32:
+ # This still is a hash on 64-bit machine. Compress to 32-bit.
+ v = v / 2 ** 32
+
+ assert v >= 0 and v <= 2L ** 32
+
+ h = '%08x' % v
+ if h == HASH0:
+ # Avoid the special zero hash.
+ h = HASH1
+ return h
+
+ def load(self, oid, version):
+ if version:
+ raise POSException.Unsupported, "Versions aren't supported"
+ self._lock_acquire()
+ try:
+ self.conf_resource.access(self) # Update configuration
+ event, classification, state, hash_value = self._gwio.load(oid)
+ file = StringIO()
+ p = Pickler(file)
+ p.dump(classification)
+ p.dump(state)
+ data = file.getvalue()
+ h = self.hash64(hash_value)
+ if self.debug_conflicts:
+ self._loaded_hashes[oid] = hash_value
+ if self.scanner is not None:
+ sources = event.mapper.gateway.get_sources(event)
+ self.scanner.after_load(oid, sources)
+ return data, h
+ finally:
+ self._lock_release()
+
+ def store(self, oid, h64, data, version, transaction):
+ if transaction is not self._transaction:
+ raise POSException.StorageTransactionError(self, transaction)
+
+ if version:
+ raise POSException.Unsupported, "Versions aren't supported"
+
+ self._lock_acquire()
+ try:
+ self.conf_resource.access(self) # Update configuration
+
+ # First detect conflicts.
+ # The "h64" argument, if its value is not 0,
+ # was previously generated by hash64().
+ if h64 == HASH0:
+ # Writing a new object.
+ is_new = True
+ else:
+ # Overwriting an old object. Use the hash to verify
+ # that the new data was derived from the old data.
+ is_new = False
+ event, old_c, old_state, old_hash = self._gwio.load(oid)
+ old_h64 = self.hash64(old_hash)
+ if h64 != old_h64:
+ h = None
+ if self.debug_conflicts:
+ h = self._loaded_hashes.get(oid)
+ if h is None:
+ h = h64
+ old_hash = old_h64
+ error = ("Storing %s based on old data. %s != %s." % (
+ repr(oid), repr(h), repr(old_hash)))
+ if self.debug_conflicts:
+ # Expose the error for debugging..
+ raise RuntimeError(error)
+ else:
+ # Use normal ZODB conflict errors.
+ raise POSException.ConflictError(error)
+
+ # Now unpickle and store the data.
+ file = StringIO(data)
+ u = Unpickler(file)
+ classification = u.load()
+ state = u.load()
+ event, new_hash = self._gwio.store(
+ oid, classification, state, is_new)
+ new_h64 = self.hash64(new_hash)
+ if self.debug_conflicts:
+ self._loaded_hashes[oid] = new_hash
+
+ # Remember that this OID changed (for scanning)
+ tid = self.getTransactionId()
+ t = self.changed.get(tid)
+ if t is None:
+ t = {}
+ self.changed[tid] = t
+ t[oid] = 1
+ finally:
+ self._lock_release()
+
+ return new_h64
+
+ def get_all_sources(self, oids):
+ self._lock_acquire()
+ try:
+ res = {}
+ for oid in oids:
+ res[oid] = self._gwio.get_sources(oid)
+ return res
+ finally:
+ self._lock_release()
+
+ def new_oid(self):
+ return self._gwio.new_oid()
+
+ def lastTransaction(self):
+ return self._ltid
+
+ def _clear_temp(self):
+ pass
+
+ def _abort(self):
+ for c in self._conn_list:
+ c.abort()
+ tid = self.getTransactionId()
+ if self.changed.has_key(tid):
+ del self.changed[tid]
+
+ def _begin(self, tid, u, d, e):
+ for c in self._conn_list:
+ c.begin()
+
+ def _finish(self, tid, user, desc, ext):
+ for c in self._conn_list:
+ c.finishWrite()
+ tid = self.getTransactionId()
+ self._ltid = tid
+ if self.changed.has_key(tid):
+ oids = self.changed[tid]
+ del self.changed[tid]
+ if self.scanner:
+ for oid in oids.keys():
+ sources = self._gwio.get_sources(oid)
+ self.scanner.changed_sources(oid, sources)
+ for c in self._conn_list:
+ c.finishCommit()
+
+ def _vote(self):
+ for c in self._conn_list:
+ c.vote()
+
+ def pack(self, t, referencesf):
+ pass
+
+ def _splat(self):
+ """Spit out a string showing state.
+ """
+ return ''
+
+ def close(self):
+ for c in self._conn_list:
+ c.close()
+ self.conf_resource.release(self)
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/storage.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/utils.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/utils.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/utils.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,69 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Utilities for handling ZODB objects.
+
+$Id$
+"""
+
+from cStringIO import StringIO
+from cPickle import Pickler, Unpickler
+from types import StringType
+
+
+def zodb_copy(source):
+ """Copies a ZODB object, loading subobjects as needed.
+
+ Re-ghostifies objects along the way to save memory.
+ """
+ former_ghosts = []
+ zclass_refs = {}
+
+ def persistent_id(ob, former_ghosts=former_ghosts,
+ zclass_refs=zclass_refs):
+ if getattr(ob, '_p_changed', 0) is None:
+ # Load temporarily.
+ former_ghosts.append(ob)
+ ob._p_changed = 0
+ if hasattr(ob, '__bases__'):
+ m = getattr(ob, '__module__', None)
+ if (m is not None
+ and isinstance(m, StringType)
+ and m.startswith('*')):
+ n = getattr(ob, '__name__', None)
+ if n is not None:
+ # Pickling a ZClass instance. Store the reference to
+ # the ZClass class separately, so that the pickler
+ # and unpickler don't trip over the apparently
+ # missing module.
+ ref = (m, n)
+ zclass_refs[ref] = ob
+ return ref
+ return None
+
+ def persistent_load(ref, zclass_refs=zclass_refs):
+ return zclass_refs[ref]
+
+ stream = StringIO()
+ p = Pickler(stream, 1)
+ p.persistent_id = persistent_id
+ p.dump(source)
+ if former_ghosts:
+ for g in former_ghosts:
+ del g._p_changed
+ del former_ghosts[:]
+ stream.seek(0)
+ u = Unpickler(stream)
+ u.persistent_load = persistent_load
+ return u.load()
+
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/utils.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zodb3/zodbtables.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zodb3/zodbtables.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zodb3/zodbtables.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,419 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""ZODB tables with support for basic relational operations.
+
+$Id$
+"""
+
+import ZODB
+from Persistence import Persistent
+from BTrees.IIBTree import IITreeSet, intersection
+from BTrees.IOBTree import IOBTree
+from BTrees.OIBTree import OIBTree
+from BTrees.OOBTree import OOBTree
+from Record import Record
+
+
+class DuplicateError(Exception):
+ """Duplicated data record"""
+
+
+class Column:
+
+ def __init__(self, name, primary, indexed):
+ self.name = name # string
+ self.primary = primary # boolean
+ self.indexed = indexed # boolean
+
+ def __repr__(self):
+ return "<%s(%s)>" % (self.__class__.__name__, self.name)
+
+
+class TableSchema:
+
+ reserved_names = ('rid',)
+
+ def __init__(self):
+ self.columns = []
+ self.column_names = {}
+
+ def add(self, name, primary=0, indexed=0):
+ if name in self.reserved_names:
+ raise ValueError, "Column name %s is reserved" % repr(name)
+ if self.column_names.has_key(name):
+ raise ValueError, "Column %s already exists" % repr(name)
+ self.column_names[name] = 1
+ self.columns.append(Column(name, primary, indexed))
+
+ def get_columns(self):
+ return tuple(self.columns)
+
+ def __repr__(self):
+ names = []
+ for c in self.columns:
+ names.append(c.name)
+ return "<%s(%s)>" % (self.__class__.__name__, ', '.join(names))
+
+
+class TableRecordMixin:
+
+ def __repr__(self):
+ items = []
+ for name, position in self.__record_schema__.items():
+ value = repr(getattr(self, name))
+ items.append((position, "%s=%s" % (name, value)))
+ items.sort()
+ params = []
+ for position, p in items:
+ params.append(p)
+ return "<%s(%s)>" % (self.__class__.__name__, ', '.join(params))
+
+
+class Table(Persistent):
+ """Simple, generic relational table.
+ """
+ schema = None
+ _v_record_class = None
+
+ def __init__(self, schema=None):
+ if schema is not None:
+ self.schema = schema
+ columns = schema.get_columns()
+ self.col_info = [] # [(tuple position, column),]
+ self.positions = {}
+ for i in range(len(columns)):
+ # Leave space for the record ID at position 0.
+ position = i + 1
+ self.col_info.append((position, columns[i]))
+ self.positions[columns[i].name] = position
+ self.proto_record = [None] * (len(columns) + 1)
+ self.next_rid = 1
+ self.clear()
+
+
+ def clear(self):
+ self.data = IOBTree() # {rid -> record as tuple}
+ self.indexes = {} # {index_name -> OOBTree({value -> IITreeSet})}
+ self.primary_index = OIBTree() # {primary key -> rid}
+ for position, column in self.col_info:
+ if column.indexed:
+ self.indexes[column.name] = OOBTree()
+
+
+ def tuplify(self, params):
+ """Accepts a mapping-like object and returns a tuple.
+ """
+ record = self.proto_record[:]
+ positions = self.positions
+ if hasattr(params, '__record_schema__'):
+ for name in params.__record_schema__.keys():
+ position = positions[name]
+ record[position] = params[name]
+ else:
+ for name, value in params.items():
+ position = positions[name]
+ record[position] = value
+ return tuple(record)
+
+
+ def insert(self, params):
+ record = self.tuplify(params)
+
+ # Determine the primary key.
+ primary_key = []
+ for position, column in self.col_info:
+ if column.primary:
+ if record[position] is None:
+ raise ValueError, (
+ "No value provided for primary key column %s"
+ % repr(column.name))
+ primary_key.append(record[position])
+ if primary_key:
+ primary_key = tuple(primary_key)
+ if self.primary_index.has_key(primary_key):
+ raise DuplicateError(
+ "Primary key %s in use" % repr(primary_key))
+
+ # Add a record.
+ rid = self.next_rid
+ self.next_rid += 1 # XXX Hotspot!
+ record = (rid,) + record[1:]
+ self.data[rid] = record
+ if primary_key:
+ self.primary_index[primary_key] = rid
+
+ # Add to indexes.
+ for position, column in self.col_info:
+ name = column.name
+ value = record[position]
+ if value is not None:
+ if self.indexes.has_key(name):
+ set = self.indexes[name].get(value)
+ if set is None:
+ set = IITreeSet()
+ self.indexes[name][value] = set
+ set.insert(rid)
+
+ # Return the number of rows inserted.
+ return 1
+
+
+ def delete(self, filter):
+ rids = self._select_rids(self.tuplify(filter))
+ if rids is None:
+ # Zap everything
+ count = len(self.data)
+ self.clear()
+ return count
+ elif not rids:
+ # No rows selected
+ return 0
+
+ rids = tuple(rids) # Make sure rids is a static sequence
+ for rid in rids:
+ old_r = self.data[rid]
+ assert old_r[0] == rid
+ primary_key = []
+ for position, column in self.col_info:
+ old_value = old_r[position]
+ if old_value is not None:
+ if column.primary:
+ primary_key.append(old_value)
+ # Remove from indexes.
+ index = self.indexes.get(column.name)
+ if index is not None:
+ if index.has_key(old_value):
+ # Remove an index entry.
+ set = index[old_value]
+ set.remove(rid)
+ if not set:
+ del index[old_value]
+
+ if primary_key:
+ # Remove a primary key.
+ primary_key = tuple(primary_key)
+ assert self.primary_index[primary_key] == rid
+ del self.primary_index[primary_key]
+
+ # Remove the data.
+ del self.data[rid]
+
+ return len(rids)
+
+
+ def update(self, filter, changes):
+ rids = self._select_rids(self.tuplify(filter))
+ if rids is None:
+ rids = self.data.keys()
+ elif not rids:
+ # Nothing needs to be updated.
+ return 0
+ count = len(rids)
+
+ # Identify changes.
+ old_data = {} # rid -> old tuple
+ new_data = {} # rid -> new tuple
+ old_to_new = {} # old primary key -> new primary key
+ new_to_rid = {} # new primary key -> rid
+
+ record = self.tuplify(changes)
+ for rid in rids:
+ old_r = self.data[rid]
+ old_data[rid] = old_r
+ new_r = list(old_r)
+ # new_r and old_r contain record tuples.
+ for position, column in self.col_info:
+ if record[position] is not None:
+ new_r[position] = record[position]
+ new_data[rid] = tuple(new_r)
+ # Hmm. The code below allows an update to change the primary
+ # key. It might be better to prevent primary key columns from
+ # being changed by an update() call.
+ opk = []
+ npk = []
+ for position, column in self.col_info:
+ if column.primary:
+ opk.append(old_r[position])
+ npk.append(new_r[position])
+ if opk != npk:
+ opk = tuple(opk)
+ npk = tuple(npk)
+ old_to_new[opk] = npk
+ new_to_rid[npk] = rid
+
+ # Look for primary key conflicts. A primary key conflict can
+ # occur when changing a record to a different primary key and
+ # the new primary key is already in use.
+ for pk in old_to_new.values():
+ if (self.primary_index.has_key(pk)
+ and not old_to_new.has_key(pk)):
+ raise DuplicateError("Primary key %s in use" % repr(pk))
+
+ # Update the data.
+ self.data.update(new_data)
+
+ # Remove old primary key indexes and insert new primary key indexes.
+ for pk in old_to_new.keys():
+ del self.primary_index[pk]
+ self.primary_index.update(new_to_rid)
+
+ # Update indexes.
+ for rid, old_r in old_data.items():
+ for position, column in self.col_info:
+ index = self.indexes.get(column.name)
+ if index is not None:
+ new_value = record[position]
+ old_value = old_r[position]
+ if new_value != old_value:
+ if old_value is not None and index.has_key(old_value):
+ # Remove an index entry.
+ set = index[old_value]
+ set.remove(rid)
+ if not set:
+ del index[old_value]
+ if new_value is not None:
+ # Add an index entry.
+ set = index.get(new_value)
+ if set is None:
+ set = IITreeSet()
+ index[new_value] = set
+ set.insert(rid)
+
+ # Return the number of rows affected.
+ return count
+
+
+ def get_record_class(self):
+ klass = self._v_record_class
+ if klass is None:
+ schema = {'rid': 0}
+ for position, column in self.col_info:
+ schema[column.name] = position
+ class TableRecord(TableRecordMixin, Record):
+ __record_schema__ = schema
+ self._v_record_class = klass = TableRecord
+ return klass
+
+
+ def select(self, filter):
+ rids = self._select_rids(self.tuplify(filter))
+ if rids is None:
+ # All
+ klass = self.get_record_class()
+ return [klass(rec) for rec in self.data.values()]
+ elif rids:
+ # Some
+ klass = self.get_record_class()
+ data = self.data
+ return [klass(data[rid]) for rid in rids]
+ else:
+ # None
+ return []
+
+
+ def _select_rids(self, query):
+ """Searches the table for matches, returning record ids.
+
+ Returns a sequence of record ids, or None for all records.
+ """
+ primary_key = []
+ params = 0 # The number of parameters specified
+ primary_params = 0 # The number of primary params specified
+ for position, column in self.col_info:
+ value = query[position]
+ if value is not None:
+ params += 1
+ if column.primary:
+ primary_params += 1
+ if primary_key is not None:
+ primary_key.append(value)
+ elif column.primary:
+ # Didn't fully specify the primary key.
+ # Can't search by primary key.
+ primary_key = None
+
+ if not params:
+ # No query. Select all.
+ return None
+
+ # First strategy: try to satisfy the request by consulting
+ # the primary key index.
+ if primary_key:
+ # The primary key is complete. The result set will have
+ # either zero rows or one row.
+ primary_key = tuple(primary_key)
+ rid = self.primary_index.get(primary_key)
+ if rid is None:
+ return ()
+ # Possibly filter out the single item.
+ if params > primary_params:
+ cand = self.data[rid]
+ for position, column in self.col_info:
+ if query[position] is not None:
+ if cand[position] != query[position]:
+ # Not a match.
+ return ()
+ return (rid,)
+
+ # Second strategy: try to satisfy the request by intersecting
+ # indexes.
+ rids = None
+ iteration_filters = []
+ for position, column in self.col_info:
+ value = query[position]
+ if value is not None:
+ index = self.indexes.get(column.name)
+ if index is None:
+ iteration_filters.append((position, value))
+ else:
+ set = index.get(value)
+ if set is None:
+ # No rows satisfy this criterion.
+ return ()
+ if rids is None:
+ rids = set
+ else:
+ rids = intersection(rids, set)
+ if not rids:
+ # No rows satisfy all criteria.
+ return ()
+ if rids is not None:
+ rids = rids.keys()
+
+ if not iteration_filters:
+ # Indexes did all the work. No need to search each record.
+ return rids
+
+ # Fallback strategy: Eliminate items one by one.
+ if rids is None:
+ # Use the whole data set.
+ candidates = self.data.values()
+ else:
+ # Use the specified records.
+ candidates = [self.data[rid] for rid in rids]
+
+ rids = []
+ append = rids.append
+ for cand in candidates:
+ for position, value in iteration_filters:
+ if cand[position] != value:
+ # Not a match.
+ break
+ else:
+ # A match.
+ append(cand[0])
+ return rids
+
+ def __repr__(self):
+ return "<%s(schema=%s)>" % (self.__class__.__name__, repr(self.schema))
Property changes on: Products.Ape/trunk/lib/apelib/zodb3/zodbtables.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,18 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Zope 2 support.
+
+$Id$
+"""
+
Property changes on: Products.Ape/trunk/lib/apelib/zope2/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/apeconf.xml
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/apeconf.xml (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/apeconf.xml 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,327 @@
+<?xml version="1.0"?>
+
+<!-- Basic Zope 2 configuration for Ape. See Ape/doc/apexml.txt. -->
+
+<configuration>
+
+<!-- Classifier and OID generator -->
+
+<variation name="filesystem">
+ <classifier factory="apelib.zope2.classifier.Classifier">
+ <gateway factory="apelib.fs.classification.FSClassificationAnnotation" />
+ </classifier>
+ <oid-generator factory="apelib.core.oidgen.SerialOIDGenerator" />
+ </variation>
+ <variation name="sql">
+ <classifier factory="apelib.zope2.classifier.Classifier">
+ <gateway factory="apelib.sql.classification.SQLClassification" />
+ </classifier>
+ <oid-generator factory="apelib.sql.oidgen.SQLOIDGenerator" />
+</variation>
+
+<!-- Root mapper -->
+
+<mapper name="root" class="Persistence.PersistentMapping">
+ <serializer name="items"
+ factory="apelib.zodb3.serializers.PersistentMappingSerializer" />
+ <serializer name="roll_call" factory="apelib.zodb3.serializers.RollCall" />
+ <variation name="filesystem">
+ <gateway name="items" factory="apelib.fs.structure.root_mapping" />
+ </variation>
+ <variation name="sql">
+ <gateway name="items" factory="apelib.sql.structure.root_mapping" />
+ </variation>
+</mapper>
+<load generic="root" using="root" />
+
+<!-- 'common' is an abstract mapper that provides commonly used
+serializers and gateways. -->
+
+<mapper name="common">
+
+ <serializer name="id" order="a"
+ factory="apelib.zope2.ofsserial.IdAttribute" />
+ <variation name="filesystem">
+ <gateway name="id" factory="apelib.fs.structure.FSAutoId" />
+ </variation>
+ <variation name="sql">
+ <gateway name="id" factory="apelib.sql.structure.SQLItemId" />
+ </variation>
+
+ <serializer name="modtime"
+ factory="apelib.zodb3.serializers.ModTimeAttribute" />
+ <variation name="filesystem">
+ <gateway name="modtime"
+ factory="apelib.fs.structure.FSModTime" />
+ </variation>
+ <variation name="sql">
+ <gateway name="modtime"
+ factory="apelib.sql.structure.SQLModTime" />
+ </variation>
+
+ <serializer name="security"
+ factory="apelib.zope2.security.SecurityAttributes" />
+ <variation name="filesystem">
+ <gateway name="security"
+ factory="apelib.fs.security.FSSecurityAttributes" />
+ </variation>
+ <variation name="sql">
+ <gateway name="security"
+ factory="apelib.sql.security.SQLSecurityAttributes" />
+ </variation>
+
+ <serializer name="remainder" order="z"
+ factory="apelib.zodb3.serializers.RemainingState" />
+ <variation name="filesystem">
+ <gateway name="remainder"
+ factory="apelib.fs.properties.FSAnnotationData('remainder')" />
+ </variation>
+ <variation name="sql">
+ <gateway name="remainder"
+ factory="apelib.sql.structure.SQLRemainder" />
+ </variation>
+
+</mapper>
+
+<!-- 'common_p' is an abstract mapper with properties -->
+
+<mapper name="common_p" extends="common">
+ <serializer name="properties"
+ factory="apelib.zope2.ofsserial.OFSProperties" />
+ <variation name="filesystem">
+ <gateway name="properties"
+ factory="apelib.fs.properties.FSProperties" />
+ </variation>
+ <variation name="sql">
+ <gateway name="properties"
+ factory="apelib.sql.properties.SQLMultiTableProperties" />
+ </variation>
+</mapper>
+
+<!-- 'common_text' is an abstract mapper with properties and a text body -->
+
+<mapper name="common_text" extends="common_p">
+ <serializer name="text"
+ factory="apelib.core.serializers.StringDataAttribute('raw')" />
+ <variation name="filesystem">
+ <gateway name="text"
+ factory="apelib.fs.structure.FSFileData('text')" />
+ </variation>
+ <variation name="sql">
+ <gateway name="text"
+ factory="apelib.sql.structure.SQLObjectData" />
+ </variation>
+</mapper>
+
+<!-- Folder -->
+
+<mapper name="folder" extends="common_p"
+ class="OFS.Folder.Folder">
+ <serializer name="items"
+ factory="apelib.zope2.ofsserial.FolderItems" />
+ <variation name="filesystem">
+ <gateway name="items"
+ factory="apelib.fs.structure.FSDirectoryItems" />
+ </variation>
+ <variation name="sql">
+ <gateway name="items"
+ factory="apelib.sql.structure.SQLFolderItems" />
+ </variation>
+</mapper>
+<store exact-class="OFS.Folder.Folder" using="folder" />
+<load generic="directory" using="folder" />
+
+<!-- File -->
+
+<mapper name="file" extends="common_p"
+ class="OFS.Image.File">
+ <serializer name="data"
+ factory="apelib.zope2.ofsserial.FilePData" />
+ <variation name="filesystem">
+ <gateway name="data"
+ factory="apelib.fs.structure.FSFileData('binary')" />
+ </variation>
+ <variation name="sql">
+ <gateway name="data"
+ factory="apelib.sql.structure.SQLObjectData" />
+ </variation>
+</mapper>
+<store class="OFS.Image.File" using="file"
+ default-extension-source="content_type" />
+<load generic="file" using="file" />
+
+<!-- Image -->
+
+<mapper name="image" extends="file"
+ class="OFS.Image.Image">
+</mapper>
+<store class="OFS.Image.Image" using="image"
+ default-extension-source="content_type" />
+<load extensions=".gif .jpg .jpeg .png" using="image" />
+
+<!-- Page template -->
+
+<mapper name="page_template" extends="common_text"
+ class="Products.PageTemplates.ZopePageTemplate.ZopePageTemplate">
+ <serializer name="text"
+ factory="apelib.core.serializers.StringDataAttribute('_text')" />
+ <serializer name="bindings"
+ factory="apelib.zope2.ofsserial.AutoBindings" />
+</mapper>
+<store class="Products.PageTemplates.ZopePageTemplate.ZopePageTemplate"
+ using="page_template" default-extension=".html" />
+<load extensions=".html .htm .zpt .pt" using="page_template" />
+
+<!-- DTML Document -->
+
+<mapper name="dtml_document" extends="common_text"
+ class="OFS.DTMLDocument.DTMLDocument">
+ <serializer name="text"
+ factory="apelib.core.serializers.StringDataAttribute('raw')" />
+</mapper>
+<store class="OFS.DTMLDocument.DTMLDocument" using="dtml_document"
+ default-extension=".dtml" />
+
+<!-- DTML Method -->
+
+<mapper name="dtml_method" extends="dtml_document"
+ class="OFS.DTMLMethod.DTMLMethod">
+ <serializer name="properties" enabled="false" />
+ <gateway name="properties" enabled="false" />
+</mapper>
+<store class="OFS.DTMLMethod.DTMLMethod" using="dtml_method"
+ default-extension=".dtml" />
+<load extensions=".dtml" using="dtml_method" />
+
+<!-- ZSQL Method -->
+
+<mapper name="zsql" extends="common_text"
+ class="Products.ZSQLMethods.SQL.SQL">
+ <serializer name="properties"
+ factory="apelib.zope2.scripts.ZSQLMethodPropertiesSerializer" />
+ <serializer name="text"
+ factory="apelib.zope2.scripts.ZSQLMethodSerializer" />
+</mapper>
+<store class="Products.ZSQLMethods.SQL.SQL" using="zsql"
+ default-extension=".sql" />
+<load extensions=".sql" using="zsql" />
+
+<!-- Python Script -->
+
+<mapper name="python_script" extends="common_text"
+ class="Products.PythonScripts.PythonScript.PythonScript">
+ <serializer name="properties" enabled="false" />
+ <gateway name="properties" enabled="false" />
+ <serializer name="text"
+ factory="apelib.zope2.scripts.PythonScriptSerializer" />
+</mapper>
+<store class="Products.PythonScripts.PythonScript.PythonScript"
+ using="python_script" default-extension=".py" />
+<load extensions=".py" using="python_script" />
+
+<!-- User Folder -->
+
+<mapper name="user_folder" extends="common"
+ class="AccessControl.User.UserFolder">
+ <serializer name="data"
+ factory="apelib.zope2.security.UserFolderSerializer" />
+ <variation name="filesystem">
+ <gateway name="data" factory="apelib.fs.security.FSUserList" />
+ </variation>
+ <variation name="sql">
+ <gateway name="data" factory="apelib.sql.security.SQLUserList" />
+ </variation>
+</mapper>
+<store exact-class="AccessControl.User.UserFolder"
+ using="user_folder" />
+
+<!-- Arbitrary ObjectManagers -->
+
+<mapper name="anyfolder" extends="folder"
+ class="OFS.ObjectManager.ObjectManager">
+ <serializer name="properties"
+ factory="apelib.zope2.ofsserial.OptionalOFSProperties" />
+</mapper>
+<store class="OFS.ObjectManager.ObjectManager"
+ using="anyfolder" />
+
+<!-- Arbitrary SimpleItems -->
+
+<mapper name="anyfile" extends="common_p"
+ class="OFS.SimpleItem.Item">
+ <serializer name="properties"
+ factory="apelib.zope2.ofsserial.OptionalOFSProperties" />
+ <variation name="filesystem">
+ <gateway name="remainder"
+ factory="apelib.fs.structure.FSFileData('binary')" />
+ </variation>
+ <variation name="sql">
+ <gateway name="remainder"
+ factory="apelib.sql.structure.SQLObjectData" />
+ </variation>
+</mapper>
+<store class="OFS.SimpleItem.Item" using="anyfile" />
+
+<!-- Application -->
+
+<mapper name="application" extends="folder"
+ class="OFS.Application.Application">
+ <serializer name="id" enabled="false" />
+ <gateway name="id" enabled="false" />
+</mapper>
+<store class="OFS.Application.Application" using="application" />
+<load generic="basepath" using="application" />
+
+<!-- BTreeFolder2 and derivatives -->
+
+<mapper name="btreefolder2" extends="folder"
+ class="Products.BTreeFolder2.BTreeFolder2.BTreeFolder2Base">
+ <serializer name="items"
+ factory="apelib.zope2.products.BTreeFolder2Items" />
+</mapper>
+<store class="Products.BTreeFolder2.BTreeFolder2.BTreeFolder2Base"
+ using="btreefolder2" />
+
+<!-- DCWorkflow.ContainerTab -->
+
+<mapper name="containertab" extends="folder"
+ class="Products.DCWorkflow.ContainerTab.ContainerTab">
+ <serializer name="items"
+ factory="apelib.zope2.products.ContainerTabItems" />
+</mapper>
+<store class="Products.DCWorkflow.ContainerTab.ContainerTab"
+ using="containertab" />
+
+<!-- Compatibility with former mapper names. -->
+
+<load mapper-name="OFS.Folder.Folder" using="folder" />
+<load mapper-name="OFS.Image.File" using="file" />
+<load mapper-name="OFS.Image.Image" using="image" />
+<load mapper-name="Products.PageTemplates.ZopePageTemplate.ZopePageTemplate"
+ using="page_template" />
+<load mapper-name="OFS.DTMLDocument.DTMLDocument" using="dtml_document" />
+<load mapper-name="OFS.DTMLMethod.DTMLMethod" using="dtml_method" />
+<load mapper-name="Products.ZSQLMethods.SQL.SQL" using="zsql" />
+<load mapper-name="Products.PythonScripts.PythonScript.PythonScript"
+ using="python_script" />
+<load mapper-name="AccessControl.User.UserFolder" using="user_folder" />
+<load mapper-name="OFS.Application.Application" using="application" />
+
+<!-- Workarounds for objects that don't work with the anyfolder
+mapper, but do fine with anyfile. -->
+
+<store class="Products.CMFCore.SkinsTool.SkinsTool" using="anyfile" />
+<store class="App.ApplicationManager.ApplicationManager" using="anyfile" />
+
+<!-- Here is a special mapper that launches the Python debugger, pdb,
+ just before serialization or deserialization. -->
+
+<!--
+<mapper name="pdbfile" extends="anyfile">
+ <serializer factory="apelib.core.serializers.PDBSerializer" />
+</mapper>
+<store class="Products.GroupUserFolder.GRUFFolder.GRUFUsers"
+ using="pdbfile" />
+-->
+
+</configuration>
Added: Products.Ape/trunk/lib/apelib/zope2/classifier.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/classifier.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/classifier.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,181 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Zope 2 object classification.
+
+$Id$
+"""
+
+from mimetypes import guess_extension
+
+from Acquisition import aq_base
+
+from apelib.core.interfaces import IConfigurableClassifier
+from apelib.core.interfaces import ClassificationError
+
+# guess_extension() is useful, but it's unoptimized and sometimes
+# chooses strange extensions. fixed_extensions does nothing other than
+# suggest a filename extension given a content type. It contains
+# some better defaults.
+fixed_extensions = {
+ 'text/plain': '.txt',
+ 'text/html': '.html',
+ 'image/png': '.png',
+ 'image/jpg': '.jpg',
+ 'image/jpeg': '.jpg',
+ 'image/gif': '.gif',
+ 'application/octet-stream': '', # No extension--too overloaded.
+ }
+
+generic_classifications = ('directory', 'file', 'basepath')
+
+
+class Classifier:
+ """A classifier with some minimal Zope 2 extensions.
+ """
+ __implements__ = IConfigurableClassifier
+ gateway = None
+
+ def __init__(self, gw=None):
+ self.gateway = gw
+ self.store_map = {} # { class_name -> { mapper name, other options } }
+ self.load_alias_map = {} # { old mapper name -> new mapper name }
+ self.load_ext_map = {} # { '.ext' -> mapper name }
+ self.load_generic_map = {} # { keyword -> mapper name }
+
+ def add_store_rule(self, class_name, mapper_name, exact=False,
+ default_extension=None, default_extension_source=None):
+ self.store_map[class_name] = {
+ 'mapper_name': mapper_name,
+ 'exact': exact,
+ 'default_extension': default_extension,
+ 'default_extension_source': default_extension_source,
+ }
+
+ def add_load_rule(self, criterion, value, mapper_name):
+ value = str(value) # Avoid unicode
+ if criterion == 'mapper-name':
+ self.load_alias_map[value] = mapper_name
+ elif criterion == 'extension':
+ self.load_ext_map[value] = mapper_name
+ elif criterion == 'generic':
+ self.load_generic_map[value] = mapper_name
+ else:
+ raise ValueError('Unknown classification criterion: %s'
+ % repr(criterion))
+
+
+ def find_class_mapper(self, event, klass, is_base=False):
+ """Searches for a mapper of a given class, including base classes.
+
+ Returns a value in store_map or None.
+ """
+ try:
+ class_name = '%s.%s' % (klass.__module__, klass.__name__)
+ except AttributeError:
+ return None
+ d = self.store_map.get(class_name)
+ if d is not None:
+ if is_base and d.get('exact'):
+ # this rule doesn't want subclasses.
+ d = None
+ if d is None:
+ for base in klass.__bases__:
+ d = self.find_class_mapper(event, base, is_base=True)
+ if d is not None:
+ break
+ return d
+
+
+ def classify_object(self, event):
+ """Chooses a classification, including a mapper, for storing an object.
+ """
+ if event.oid == event.conf.oid_gen.root_oid:
+ # Use the root mapper if one is configured.
+ mapper_name = self.load_generic_map.get('root')
+ if mapper_name is not None:
+ return {'mapper_name': mapper_name}
+ klass = event.obj.__class__
+ class_name = '%s.%s' % (klass.__module__, klass.__name__)
+ classification = {'class_name': class_name}
+ opts = self.find_class_mapper(event, klass)
+ if opts is None:
+ raise ClassificationError(
+ 'No mapper known for class %s' % repr(class_name))
+ classification['mapper_name'] = opts['mapper_name']
+ if opts.get('default_extension_source') == 'content_type':
+ ct = str(getattr(aq_base(event.obj), 'content_type', None))
+ ext = fixed_extensions.get(ct)
+ if ext is None:
+ ext = guess_extension(ct)
+ else:
+ ext = opts.get('default_extension')
+ if ext:
+ classification['extension'] = ext
+ return classification
+
+
+ def classify_state(self, event):
+ """Chooses a classification, including a mapper, for loading an object.
+ """
+ if event.oid == event.conf.oid_gen.root_oid:
+ # Use the root mapper if one is configured.
+ mapper_name = self.load_generic_map.get('root')
+ if mapper_name is not None:
+ return {'mapper_name': mapper_name}
+ classification, serial = self.gateway.load(event)
+ class_name = classification.get('class_name')
+ if class_name and ':' in class_name:
+ # Backward compatibility
+ class_name = class_name.replace(':', '.')
+ classification['class_name'] = class_name
+ mapper_name = classification.get('mapper_name')
+ if mapper_name is not None:
+ # Possibly update to a new mapper name
+ mapper_name = self.load_alias_map.get(
+ mapper_name, mapper_name)
+ if mapper_name is None:
+ # The choice of mapper is not stored explicitly. Choose
+ # one based on several criteria.
+ if False:
+ # bw compat: look for certain meta_types.
+ mt = classification.get('meta_type')
+ if mt == '(folderish object)':
+ mapper_name = 'anyfolder'
+ elif mt == '(fileish object)':
+ mapper_name = 'anyfile'
+ if mapper_name is None:
+ subpath = classification.get('subpath')
+ if subpath is not None and not subpath:
+ # Application base
+ mapper_name = self.load_generic_map.get('basepath')
+ if mapper_name is None:
+ t = classification.get('node_type')
+ if t == 'd':
+ # Directory
+ mapper_name = self.load_generic_map.get('directory')
+ elif t == 'f':
+ # File
+ ext = classification.get('extension')
+ if ext:
+ if not ext.startswith('.'):
+ ext = '.' + ext
+ mapper_name = self.load_ext_map.get(ext.lower())
+ if not mapper_name:
+ mapper_name = self.load_generic_map.get('file')
+ if mapper_name is None:
+ raise ClassificationError(
+ 'No mapper known for oid %s' % repr(event.oid))
+
+ classification['mapper_name'] = mapper_name
+ return classification
Property changes on: Products.Ape/trunk/lib/apelib/zope2/classifier.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/mapper.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/mapper.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/mapper.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,74 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Zope 2 mapper creation
+
+$Id$
+"""
+
+import os
+import Products
+from apelib.config.apeconf import configure
+
+
+def load_conf(vname, search_products=0):
+ """Builds a mapper from apeconf.xml files.
+ """
+ here = os.path.dirname(__file__)
+ filenames = [os.path.join(here, 'apeconf.xml')]
+ if search_products:
+ seen = {} # Product name -> 1
+ for path in Products.__path__:
+ for name in os.listdir(path):
+ if not seen.has_key(name):
+ seen[name] = 1
+ fn = os.path.join(path, name, 'apeconf.xml')
+ if os.path.exists(fn):
+ filenames.append(fn)
+ return configure(filenames, vname)
+
+
+def create_fs_mapper(basepath, **kw):
+ """Filesystem mapper factory.
+
+ Returns (mapper, { name -> connection })
+
+ Usage in database configuration file:
+ factory=apelib.zope2.mapper.create_fs_mapper
+ basepath=/var/zope/data
+ """
+ from apelib.fs.connection import FSConnection
+
+ mapper = load_conf('filesystem', search_products=1)
+ conn = FSConnection(basepath, **kw)
+ return mapper, {'fs': conn}
+
+
+def create_sql_mapper(module_name, **kw):
+ """SQL mapper factory.
+
+ Returns (mapper, { name -> connection })
+
+ Usage in database configuration file:
+ factory=apelib.zope2.mapper.create_sql_mapper
+ module_name=psycopg
+ params=
+ kwparams=
+ table_prefix=zodb
+ """
+ from apelib.sql.dbapi import DBAPIConnector
+
+ mapper = load_conf('sql', search_products=1)
+ conn = DBAPIConnector(module_name, **kw)
+ return mapper, {'db': conn}
+
Property changes on: Products.Ape/trunk/lib/apelib/zope2/mapper.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/ofsserial.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/ofsserial.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/ofsserial.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,286 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Serializers for OFSP (object file system product) objects
+
+$Id$
+"""
+
+from cPickle import dumps, loads
+from types import DictType
+
+from Acquisition import aq_base
+from OFS.SimpleItem import Item_w__name__
+from OFS.ObjectManager import ObjectManager
+from OFS.Image import File
+from OFS.PropertyManager import PropertyManager
+
+from apelib.core.interfaces import ISerializer, SerializationError
+from apelib.core.schemas import ColumnSchema, RowSequenceSchema
+from apelib.core.serializers import OptionalSerializer
+
+
+string_repr_types = {
+ # Properties that are safe to render as strings for storage.
+ # Other kinds of properties get pickled.
+ 'string': 1,
+ 'float': 1,
+ 'int': 1,
+ 'long': 1,
+ 'date': 1,
+ 'date_international': 1,
+ 'text': 1,
+ 'boolean': 1,
+}
+
+
+class FilePData:
+ """Serializer of the 'data' attribute of OFS.File and OFS.Image"""
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('data', 'string')
+
+ def can_serialize(self, object):
+ return isinstance(object, File)
+
+ def serialize(self, event):
+ obj = event.obj
+ event.serialized('data', obj.data, 1)
+ event.ignore(('size', 'width', 'height'))
+ return str(obj.data)
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ data, size = obj._read_data(state)
+ if not obj.__dict__.get('content_type'):
+ # Guess the content type.
+ content_type = obj._get_content_type(
+ state, data, obj.__name__)
+ else:
+ # The properties serializer is authoritative. Defer to it.
+ content_type = None
+ obj.update_data(data, content_type, size)
+ event.deserialized('data', obj.data)
+
+
+class FolderItems:
+ """Zope 2 folder items (de)serializer
+ """
+ __implements__ = ISerializer
+
+ schema = RowSequenceSchema()
+ schema.add('key', 'string', 1)
+ schema.add('oid', 'string')
+ schema.add('classification', 'classification')
+
+ def can_serialize(self, obj):
+ return isinstance(obj, ObjectManager)
+
+ def serialize(self, event):
+ obj = event.obj
+ assert isinstance(obj, ObjectManager), repr(obj)
+ state = []
+ event.ignore('_objects')
+ d = obj.__dict__
+ for id in obj.objectIds():
+ if d.has_key(id):
+ base = d[id]
+ else:
+ # Fall back to _getOb.
+ base = aq_base(obj._getOb(id))
+ oid = event.obj_db.identify(base)
+ if oid is None:
+ oid = event.obj_db.new_oid()
+ event.referenced(id, base, True, oid)
+ # No need to pass classification.
+ state.append((id, oid, None))
+ return state
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ assert isinstance(obj, ObjectManager), obj
+ for (id, oid, classification) in state:
+ subob = event.resolve(id, oid, classification)
+ obj._setOb(id, subob)
+ obj._objects += ({
+ 'id': id,
+ 'meta_type': subob.__class__.meta_type,
+ },)
+
+
+class IdAttribute:
+ """Zope 2 id attribute."""
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('id', 'string')
+
+ def can_serialize(self, obj):
+ return 1
+
+ def _get_attr_name_for(self, obj):
+ if isinstance(obj, Item_w__name__):
+ return '__name__'
+ else:
+ return 'id'
+
+ def serialize(self, event):
+ obj = event.obj
+ attrname = self._get_attr_name_for(obj)
+ id = getattr(obj, attrname)
+ if not id:
+ raise SerializationError('ID of %r is %r' % (obj, id))
+ event.serialized(attrname, id, 1)
+ return id
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ attrname = self._get_attr_name_for(obj)
+ setattr(obj, attrname, state)
+ # Allow references under either attribute name.
+ event.deserialized('id', state)
+ event.deserialized('__name__', state)
+
+
+class AutoBindings:
+ """For classes that extend Shared.DC.Scripts.Bindings.Bindings.
+
+ Discards the name bindings at serialization time and re-creates
+ them at deserialization.
+ """
+
+ __implements__ = ISerializer
+
+ schema = None # No storage
+
+ def can_serialize(self, obj):
+ return 1
+
+ def serialize(self, event):
+ event.ignore('_bind_names')
+ return None
+
+ def deserialize(self, event, state):
+ assert state is None, state
+ # prepare the _bind_names attribute
+ event.obj.getBindingAssignments()
+
+
+class OFSProperties:
+ """Serializer for OFS.PropertyManager properties."""
+
+ __implements__ = ISerializer
+
+ schema = RowSequenceSchema()
+ schema.add('id', 'string', 1)
+ schema.add('type', 'string')
+ schema.add('data', 'string')
+
+ def can_serialize(self, obj):
+ return isinstance(obj, PropertyManager)
+
+ def serialize(self, event):
+ res = []
+ obj = event.obj
+ assert isinstance(obj, PropertyManager), repr(obj)
+ assert obj._properties is obj._propertyMap()
+ event.ignore('_properties')
+ for p in obj._properties:
+ name = p['id']
+ t = p['type']
+ event.ignore(name)
+ data = obj.getProperty(name)
+ if t == 'lines':
+ v = '\n'.join(data)
+ elif t == 'boolean':
+ v = data and '1' or '0'
+ elif string_repr_types.get(t):
+ v = str(data)
+ else:
+ # Pickle the value and any extra info about the property.
+ # Extra info is present in select and multi-select properties.
+ d = p.copy()
+ del d['id']
+ del d['type']
+ if d.has_key('mode'):
+ del d['mode']
+ d['value'] = data
+ v = dumps(d)
+ res.append((name, t, v))
+ return res
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ assert isinstance(obj, PropertyManager)
+ assert obj._properties is obj._propertyMap()
+ if not state:
+ # No stored properties. Revert the object to its
+ # class-defined property schema.
+ if obj.__dict__.has_key('_properties'):
+ del obj._properties
+ return
+
+ old_props = obj.propdict()
+ new_props = {}
+ for id, t, v in state:
+ p = old_props.get(id)
+ if p is None:
+ p = {'mode': 'wd'}
+ else:
+ p = p.copy()
+ p['id'] = id
+ p['type'] = t
+ if v and not string_repr_types.get(t) and t != 'lines':
+ # v is a pickle.
+ # Check the pickle for extra property info.
+ d = loads(v)
+ if isinstance(d, DictType):
+ del d['value']
+ if d:
+ # The data is stored with extra property info.
+ p.update(d)
+ new_props[id] = p
+
+ if old_props != new_props:
+ obj._properties = tuple(new_props.values())
+
+ for id, t, v in state:
+ if t == 'lines':
+ data = v.split('\n')
+ elif t == 'boolean':
+ # match 0, [f]alse, [n]o
+ if (not v or v == '0' or v[:1].lower() in 'fn'):
+ data = 0
+ else:
+ data = 1
+ elif string_repr_types.get(t):
+ data = str(v)
+ elif v:
+ d = loads(v)
+ if isinstance(d, DictType):
+ # The data is stored with extra property info.
+ data = d['value']
+ else:
+ data = d
+ else:
+ # Fall back to a default.
+ data = ''
+ obj._updateProperty(id, data)
+
+
+class OptionalOFSProperties(OptionalSerializer):
+
+ def __init__(self):
+ OptionalSerializer.__init__(self, OFSProperties(), [])
+
Property changes on: Products.Ape/trunk/lib/apelib/zope2/ofsserial.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/products.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/products.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/products.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,81 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Serializers for Zope products
+
+$Id$
+"""
+
+from apelib.core.interfaces import ISerializer
+from apelib.core.schemas import RowSequenceSchema
+from apelib.zodb3.serializers import find_unmanaged
+from apelib.zope2.ofsserial import FolderItems
+
+
+class BTreeFolder2Items:
+ """BTreeFolder2 items (de)serializer
+ """
+ __implements__ = ISerializer
+
+ schema = RowSequenceSchema()
+ schema.add('key', 'string', 1)
+ schema.add('oid', 'string')
+ schema.add('classification', 'classification')
+
+ def can_serialize(self, obj):
+ return hasattr(obj, '_tree')
+
+ def serialize(self, event):
+ obj = event.obj
+ assert self.can_serialize(obj)
+ state = []
+ event.ignore('_objects')
+ d = obj._tree
+ event.ignore(('_tree', '_mt_index', '_count'))
+ for id in obj.objectIds():
+ base = d[id]
+ oid = event.obj_db.identify(base)
+ if oid is None:
+ oid = event.obj_db.new_oid()
+ event.referenced(id, base, True, oid)
+ # No need to pass classification.
+ state.append((id, oid, None))
+ # The structure that makes up the BTree (the root node and
+ # the buckets) are unmanaged. Tell the event about them.
+ event.upos.extend(find_unmanaged(obj._tree, obj._tree.values()))
+ return state
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ if hasattr(obj, '_initBTrees'):
+ # Version 1.0.1+ of BTreeFolder2
+ obj._initBTrees()
+ else:
+ # Crufty workaround for older versions
+ obj.__init__(obj.id)
+ assert self.can_serialize(obj)
+ for (id, oid, classification) in state:
+ subob = event.resolve(id, oid, classification)
+ obj._setOb(id, subob)
+ # The tree and the buckets are unmanaged.
+ event.upos.extend(find_unmanaged(obj._tree, obj._tree.values()))
+
+
+class ContainerTabItems (FolderItems):
+ """DCWorkflow.ContainerTab items (de)serializer"""
+
+ def deserialize(self, event, state):
+ # This object needs a little help with initialization
+ event.obj._mapping = {}
+ FolderItems.deserialize(self, event, state)
+
Property changes on: Products.Ape/trunk/lib/apelib/zope2/products.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/scripts.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/scripts.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/scripts.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,174 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Zope 2 script serialization
+
+$Id$
+"""
+
+import re
+from types import StringType
+
+from Products.PythonScripts.PythonScript import PythonScript
+from Products.ZSQLMethods.SQL import SQL, SQLConnectionIDs
+from Shared.DC.ZRDB.Aqueduct import parse
+try:
+ from IOBTree import Bucket
+except ImportError:
+ Bucket = lambda: {}
+
+from apelib.core.interfaces import ISerializer
+from apelib.core.schemas import ColumnSchema, RowSequenceSchema
+
+
+class PythonScriptSerializer:
+ """Serializer for PythonScripts.
+
+ PythonScriptSerializer serializes using the same representation
+ as FTP or WebDAV. All computable attributes like compiled code
+ are dropped.
+ """
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('data', 'string')
+
+ def can_serialize(self, obj):
+ return isinstance(obj, PythonScript)
+
+ def serialize(self, event):
+ assert isinstance(event.obj, PythonScript)
+ data = event.obj.read()
+ assert isinstance(data, StringType)
+ event.ignore((
+ 'title', '_params', '_body', '_bind_names',
+ 'warnings', 'errors', '_code', 'Python_magic',
+ 'Script_magic', 'func_defaults', 'func_code',
+ 'co_varnames', 'co_argcount',
+ ))
+ return data
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ assert isinstance(state, StringType)
+ assert isinstance(event.obj, PythonScript)
+ # Circumvent proxy role checking while deserializing the script.
+ obj._validateProxy = lambda: 0
+ try:
+ obj.write(state)
+ obj._makeFunction()
+ finally:
+ # Remove the proxy circumvention
+ del obj._validateProxy
+
+
+
+class ZSQLMethodSerializer:
+ """Serializer for ZSQLMethods.
+
+ ZSQLMethodSerializer serializes using the same representation
+ as FTP or WebDAV. All computable attributes like compiled code
+ are dropped.
+ """
+
+ __implements__ = ISerializer
+
+ schema = ColumnSchema('data', 'string')
+
+ params_re = re.compile(r'\s*<params>(.*)</params>\s*\n', re.I | re.S)
+
+ def can_serialize(self, obj):
+ return isinstance(obj, SQL)
+
+ def serialize(self, event):
+ data = event.obj.document_src()
+ event.ignore(('_arg', 'template', 'arguments_src', 'src'))
+ return data
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ assert isinstance(state, StringType)
+ assert isinstance(obj, SQL)
+ body = state
+ m = self.params_re.match(body)
+ if m:
+ obj.arguments_src = m.group(1)
+ body = body[m.end():]
+ else:
+ obj.arguments_src = ''
+ obj._arg = parse(obj.arguments_src)
+ obj.src = body
+ obj.template = obj.template_class(body)
+ obj.template.cook()
+ obj._v_cache = ({}, Bucket())
+ if not hasattr(obj, 'connection_id'):
+ obj.connection_id = ''
+
+
+class ZSQLMethodPropertiesSerializer:
+ __implements__ = ISerializer
+
+ schema = RowSequenceSchema()
+ schema.add('id', 'string', 1)
+ schema.add('type', 'string')
+ schema.add('data', 'string')
+
+ attributes = {
+ 'title': str,
+ 'connection_id': str,
+ 'max_rows_': int,
+ 'max_cache_': int,
+ 'cache_time': int,
+ 'class_name_': str,
+ 'class_file_': str,
+ 'zclass': str, # XXX, what's that
+ 'allow_simple_one_argument_traversal': int,
+ 'connection_hook': str,
+ }
+
+ def can_serialize(self, obj):
+ return isinstance(obj, SQL)
+
+ def serialize(self, event):
+ obj = event.obj
+ assert isinstance(obj, SQL)
+ res = []
+ for attribute, factory in self.attributes.items():
+ if not hasattr(obj, attribute):
+ continue
+ value = getattr(obj, attribute)
+ t = factory.__name__
+ if value is None:
+ if factory in (int, long):
+ value = 0
+ else:
+ value = ''
+ value = str(value)
+ event.serialized(attribute, value, 1)
+ res.append((attribute, t, value))
+ event.ignore('_col')
+ return res
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ assert isinstance(obj, SQL)
+ for attribute, t, value in state:
+ factory = self.attributes.get(attribute)
+ if factory is None:
+ continue
+ value = factory(value)
+ setattr(obj, attribute, value)
+ event.deserialized(attribute, value)
+ if not hasattr(obj, 'connection_id'):
+ obj.connection_id = ''
+
Property changes on: Products.Ape/trunk/lib/apelib/zope2/scripts.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/security.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/security.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/security.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,269 @@
+##############################################################################
+#
+# Copyright (c) 2003 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Zope 2 security information serializers.
+
+$Id$
+"""
+
+from types import TupleType
+
+from Persistence import PersistentMapping
+from AccessControl.User import User, UserFolder
+from AccessControl.Permission import pname
+import Products
+
+from apelib.core.interfaces import ISerializer
+from apelib.core.schemas import RowSequenceSchema
+
+
+_permission_dict_cache = None
+
+def get_permission_dict():
+ """Returns a dictionary mapping permission attribute name to permission.
+
+ Does not discover permissions defined in ZClass products, since that
+ would require access to the Zope application in the database.
+ """
+ global _permission_dict_cache
+ if _permission_dict_cache is not None:
+ return _permission_dict_cache
+ res = {}
+ for item in Products.__ac_permissions__:
+ p = item[0]
+ attr = pname(p)
+ res[attr] = p
+ _permission_dict_cache = res
+ return res
+
+
+## Declaration types:
+##
+## executable owner
+## "executable-owner", "", "", path/to/userfolder/username
+## local roles
+## "local-role", role_name, "", username
+## user-defined roles
+## "define-role", role_name, "", ""
+## proxy roles
+## "proxy-role", role_name, "", ""
+## permission mapping
+## "permission-role", role_name, permission_name, ""
+## "permission-no-acquire", "", permission_name, ""
+
+
+
+class SecurityAttributes:
+ """Zope 2 security attribute serializer."""
+
+ __implements__ = ISerializer
+
+ schema = RowSequenceSchema()
+ schema.add('declaration_type', 'string')
+ schema.add('role', 'string')
+ schema.add('permission', 'string')
+ schema.add('username', 'string')
+
+ def can_serialize(self, obj):
+ return 1
+
+ def serialize(self, event):
+ res = []
+
+ # Get security attributes from the instance only, not the class.
+ # There's no need to serialize the class attributes.
+ obj_d = event.obj.__dict__
+ eo = obj_d.get('_owner')
+ if eo is not None:
+ event.ignore('_owner')
+ path, username = eo
+ if '/' in username:
+ raise ValueError, '/ not allowed in user names'
+ s = '%s/%s' % ('/'.join(path), username)
+ res.append(('executable-owner', '', '', s))
+
+ roles = obj_d.get('__ac_roles__')
+ if roles is not None:
+ event.ignore('__ac_roles__')
+ roles = list(roles)
+ roles.sort()
+ class_roles = getattr(event.obj.__class__, '__ac_roles__', None)
+ if class_roles:
+ class_roles = list(class_roles)
+ class_roles.sort()
+ if roles != class_roles:
+ for role in roles:
+ res.append(('define-role', role, '', ''))
+ # else inherit roles from the class
+
+ local_roles = obj_d.get('__ac_local_roles__')
+ if local_roles is not None:
+ event.ignore('__ac_local_roles__')
+ for username, roles in local_roles.items():
+ for role in roles:
+ res.append(('local-role', role, '', username))
+
+ proxy_roles = obj_d.get('_proxy_roles')
+ if proxy_roles is not None:
+ event.ignore('_proxy_roles')
+ for role in proxy_roles:
+ res.append(('proxy-role', role, '', ''))
+
+ p_dict = None
+ for attr, value in obj_d.items():
+ if attr.endswith('_Permission') and attr.startswith('_'):
+ if p_dict is None:
+ p_dict = get_permission_dict()
+ p = p_dict.get(attr)
+ if p is not None:
+ event.ignore(attr)
+ for role in value:
+ res.append(('permission-role', role, p, ''))
+ # List means acquired, tuple means not acquired.
+ if isinstance(value, TupleType):
+ res.append(('permission-no-acquire', '', p, ''))
+
+ return res
+
+
+ def deserialize(self, event, state):
+ local_roles = {} # { username -> [role,] }
+ defined_roles = [] # [role,]
+ proxy_roles = [] # [role,]
+ permission_roles = {} # { permission -> [role,] }
+ permission_acquired = {} # { permission -> 0 or 1 }
+
+ obj = event.obj
+ for decl_type, role, permission, username in state:
+ if decl_type == 'executable-owner':
+ assert not role
+ assert not permission
+ #assert username
+ pos = username.rfind('/')
+ if pos < 0:
+ # Default to the root folder
+ ufolder = ['acl_users']
+ uname = username
+ else:
+ ufolder = list(username[:pos].split('/'))
+ uname = username[pos + 1:]
+ assert ufolder
+ assert uname
+ obj._owner = (ufolder, uname)
+
+ elif decl_type == 'local-role':
+ #assert role
+ assert not permission
+ #assert username
+ r = local_roles.get(username)
+ if r is None:
+ r = []
+ local_roles[username] = r
+ r.append(role)
+
+ elif decl_type == 'define-role':
+ #assert role
+ assert not permission
+ assert not username
+ defined_roles.append(role)
+
+ elif decl_type == 'proxy-role':
+ #assert role
+ assert not permission
+ assert not username
+ proxy_roles.append(role)
+
+ elif decl_type == 'permission-role':
+ #assert role
+ #assert permission
+ assert not username
+ r = permission_roles.get(permission)
+ if r is None:
+ r = []
+ permission_roles[permission] = r
+ r.append(role)
+ if not permission_acquired.has_key(permission):
+ permission_acquired[permission] = 1
+
+ elif decl_type == 'permission-no-acquire':
+ assert not role
+ #assert permission
+ assert not username
+ permission_acquired[permission] = 0
+
+ else:
+ raise ValueError, (
+ 'declaration_type %s unknown' % repr(decl_type))
+
+ if local_roles:
+ obj.__ac_local_roles__ = local_roles
+ if defined_roles:
+ defined_roles.sort()
+ obj.__ac_roles__ = tuple(defined_roles)
+ if proxy_roles:
+ obj._proxy_roles = tuple(proxy_roles)
+
+ for p, acquired in permission_acquired.items():
+ roles = permission_roles.get(p, [])
+ if not acquired:
+ roles = tuple(roles)
+ setattr(obj, pname(p), roles)
+
+
+
+class UserFolderSerializer:
+ """Serializer for a user folder.
+
+ This version lets the application keep a list of all users in RAM.
+ """
+
+ __implements__ = ISerializer
+
+ schema = RowSequenceSchema()
+ schema.add('id', 'string', 1)
+ schema.add('password', 'string')
+ schema.add('roles', 'string:list')
+ schema.add('domains', 'string:list')
+
+ def can_serialize(self, obj):
+ return isinstance(obj, UserFolder)
+
+ def serialize(self, event):
+ obj = event.obj
+ assert isinstance(obj, UserFolder), repr(obj)
+ state = []
+ event.ignore('data')
+ for id, user in obj.data.items():
+ assert isinstance(user, User), repr(user)
+ assert len(user.__dict__.keys()) == 4, user.__dict__.keys()
+ r = list(user.roles)
+ r.sort()
+ d = list(user.domains)
+ d.sort()
+ state.append((id, user.__, tuple(r), tuple(d)))
+ event.serialized(id, user, 0)
+ event.upos.append(obj.data)
+ event.upos.extend(obj.data.values())
+ return state
+
+ def deserialize(self, event, state):
+ obj = event.obj
+ assert isinstance(obj, UserFolder)
+ obj.data = PersistentMapping()
+ for id, password, roles, domains in state:
+ user = User(id, password, roles, domains)
+ obj.data[id] = user
+ event.deserialized(id, user)
+ event.upos.append(obj.data)
+ event.upos.extend(obj.data.values())
+
Property changes on: Products.Ape/trunk/lib/apelib/zope2/security.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/setup/__init__.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/setup/__init__.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/setup/__init__.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,17 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Run-time Zope 2 configuration modules.
+
+$Id$
+"""
Property changes on: Products.Ape/trunk/lib/apelib/zope2/setup/__init__.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/lib/apelib/zope2/setup/patches.py
===================================================================
--- Products.Ape/trunk/lib/apelib/zope2/setup/patches.py (rev 0)
+++ Products.Ape/trunk/lib/apelib/zope2/setup/patches.py 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1,40 @@
+##############################################################################
+#
+# Copyright (c) 2002 Zope Corporation and Contributors.
+# All Rights Reserved.
+#
+# This software is subject to the provisions of the Zope Public License,
+# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
+# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
+# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
+# FOR A PARTICULAR PURPOSE.
+#
+##############################################################################
+"""Monkey-patches necessary to make apelib work in Zope.
+
+$Id$
+"""
+
+from Acquisition import aq_base
+
+from apelib.zodb3.utils import zodb_copy
+
+
+def apply_copy_support_patch():
+ # Fall back to copying by pickle when ZEXP export/import is not
+ # implemented.
+ def _getCopy(self, container):
+ try:
+ return self._real_getCopy(container)
+ except NotImplementedError:
+ return zodb_copy(aq_base(self))
+
+ from OFS.CopySupport import CopySource
+ if not hasattr(CopySource, '_real_getCopy'):
+ CopySource._real_getCopy = CopySource._getCopy
+ CopySource._getCopy = _getCopy
+
+
+def apply_patches():
+ apply_copy_support_patch()
Property changes on: Products.Ape/trunk/lib/apelib/zope2/setup/patches.py
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
Added: Products.Ape/trunk/version.txt
===================================================================
--- Products.Ape/trunk/version.txt (rev 0)
+++ Products.Ape/trunk/version.txt 2007-09-12 13:28:43 UTC (rev 79589)
@@ -0,0 +1 @@
+Ape-1.1
Property changes on: Products.Ape/trunk/version.txt
___________________________________________________________________
Name: svn:keywords
+ Id Rev Date
Name: svn:eol-style
+ native
More information about the Checkins
mailing list